summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-27 19:52:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-27 19:52:57 -0700
commit426048313dfa7d65dbd2379b1665755511f9544f (patch)
treedc727b9e41eb3d9dfe8e68f14b027c776d8aba98
parent2a56d2220284b0e4dd8569fa475d7053f1c40a63 (diff)
parent7ad20aa9d39a525542b0840ac38bfc77be831e19 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (60 commits) [SCSI] lpfc 8.3.24: Extend BSG infrastructure and add link diagnostics [SCSI] lpfc 8.3.24: Add resource extent support [SCSI] lpfc 8.3.24: Add request-firmware support [SCSI] lpfc 8.3.24: Add SR-IOV control [SCSI] lpfc 8.3.24: Extended hardware support and support dump images [SCSI] lpfc 8.3.24: Miscellaneous Fixes and Corrections [SCSI] libsas: Add option for SATA soft reset [SCSI] libsas: check dev->gone before submitting sata i/o [SCSI] libsas: fix/amend device gone notification in sas_deform_port() [SCSI] MAINTAINERS update for SCSI (new email address) [SCSI] Fix Ultrastor asm snippet [SCSI] osst: fix warning [SCSI] osst: wrong index used in inner loop [SCSI] aic94xx: world-writable sysfs update_bios file [SCSI] MAINTAINERS: Add drivers/target/ entry [SCSI] target: Convert TASK_ATTR to scsi_tcq.h definitions [SCSI] target: Convert REPORT_LUNs to use int_to_scsilun [SCSI] target: Fix task->task_execute_queue=1 clear bug + LUN_RESET OOPs [SCSI] target: Fix bug with task_sg chained transport_free_dev_tasks release [SCSI] target: Fix interrupt context bug with stats_lock and core_tmr_alloc_req ...
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas14
-rw-r--r--MAINTAINERS13
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h1
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c11
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c26
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c27
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c13
-rw-r--r--drivers/scsi/fcoe/fcoe.c58
-rw-r--r--drivers/scsi/fcoe/fcoe.h10
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c133
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c40
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/libfc/fc_disc.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c16
-rw-r--r--drivers/scsi/libfc/fc_libfc.h1
-rw-r--r--drivers/scsi/libsas/sas_ata.c60
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_phy.c4
-rw-r--r--drivers/scsi/libsas/sas_port.c21
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c14
-rw-r--r--drivers/scsi/lpfc/lpfc.h43
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c312
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h87
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h501
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c545
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1659
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c93
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c83
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c102
-rw-r--r--drivers/scsi/osst.c6
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c69
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h11
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h23
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c22
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c77
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_error.c87
-rw-r--r--drivers/scsi/scsi_trace.c4
-rw-r--r--drivers/scsi/sd.c82
-rw-r--r--drivers/scsi/ultrastor.c2
-rw-r--r--drivers/target/loopback/tcm_loop.c25
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c29
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tmr.c7
-rw-r--r--drivers/target/target_core_transport.c68
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c8
-rw-r--r--include/scsi/libsas.h1
-rw-r--r--include/scsi/scsi_tcq.h1
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/target/target_core_fabric_ops.h1
-rw-r--r--include/target/target_core_transport.h1
79 files changed, 5888 insertions, 1225 deletions
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 4d9ce73ff730..9ed1d9d96783 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,17 @@
+Release Date : Wed. May 11, 2011 17:00:00 PST 2010 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Adam Radford
+Current Version : 00.00.05.38-rc1
+Old Version : 00.00.05.34-rc1
+ 1. Remove MSI-X black list, use MFI_REG_STATE.ready.msiEnable.
+ 2. Remove un-used function megasas_return_cmd_for_smid().
+ 3. Check MFI_REG_STATE.fault.resetAdapter in megasas_reset_fusion().
+ 4. Disable interrupts/free_irq() in megasas_shutdown().
+ 5. Fix bug where AENs could be lost in probe() and resume().
+ 6. Convert 6,10,12 byte CDB's to 16 byte CDB for large LBA's for FastPath
+ IO.
+ 7. Add 1078 OCR support.
+-------------------------------------------------------------------------------
Release Date : Thu. Feb 24, 2011 17:00:00 PST 2010 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
diff --git a/MAINTAINERS b/MAINTAINERS
index a33b11560d3f..b9f5aee36375 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5515,7 +5515,7 @@ F: drivers/scsi/sg.c
F: include/scsi/sg.h
SCSI SUBSYSTEM
-M: "James E.J. Bottomley" <James.Bottomley@suse.de>
+M: "James E.J. Bottomley" <JBottomley@parallels.com>
L: linux-scsi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git
@@ -6084,6 +6084,17 @@ F: Documentation/filesystems/sysv-fs.txt
F: fs/sysv/
F: include/linux/sysv_fs.h
+TARGET SUBSYSTEM
+M: Nicholas A. Bellinger <nab@linux-iscsi.org>
+L: linux-scsi@vger.kernel.org
+L: http://groups.google.com/group/linux-iscsi-target-dev
+W: http://www.linux-iscsi.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master
+S: Supported
+F: drivers/target/
+F: include/target/
+F: Documentation/target/
+
TASKSTATS STATISTICS INTERFACE
M: Balbir Singh <balbir@linux.vnet.ibm.com>
S: Maintained
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 3b7e83d2dab4..d5ff142c93a2 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
flash_error_table[i].reason);
}
-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
asd_show_update_bios, asd_store_update_bios);
static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index c1f72c49196f..6c7e0339dda4 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -56,6 +56,8 @@ BFA_TRC_FILE(CNA, IOC);
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
@@ -647,7 +649,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
- if (bfa_ioc_sync_complete(ioc)) {
+ if (bfa_ioc_sync_start(ioc)) {
iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index ec9cf08b0e7f..c85182a704fb 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -263,6 +263,7 @@ struct bfa_ioc_hwif_s {
bfa_boolean_t msix);
void (*ioc_notify_fail) (struct bfa_ioc_s *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_sync_start) (struct bfa_ioc_s *ioc);
void (*ioc_sync_join) (struct bfa_ioc_s *ioc);
void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index e4a0713185b6..89ae4c8f95a2 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -32,6 +32,7 @@ static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
@@ -53,6 +54,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
+ hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
@@ -195,6 +197,15 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
}
/*
+ * Synchronized IOC failure processing routines
+ */
+static bfa_boolean_t
+bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
+{
+ return bfa_ioc_cb_sync_complete(ioc);
+}
+
+/*
* Cleanup hw semaphore and usecnt registers
*/
static void
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 008d129ddfcd..93612520f0d2 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
@@ -62,6 +63,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+ hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -351,6 +353,30 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
writel(1, ioc->ioc_regs.ioc_sem_reg);
}
+static bfa_boolean_t
+bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
+{
+ uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+ /*
+ * Driver load time. If the sync required bit for this PCI fn
+ * is set, it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+
+ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return BFA_TRUE;
+ }
+
+ return bfa_ioc_ct_sync_complete(ioc);
+}
+
/*
* Synchronized IOC failure processing routines
*/
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index cfd59023227b..6bdd25a93db9 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -66,11 +66,11 @@
#define BD_SPLIT_SIZE 32768
/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
-#define BNX2I_SQ_WQES_MIN 16
-#define BNX2I_570X_SQ_WQES_MAX 128
-#define BNX2I_5770X_SQ_WQES_MAX 512
-#define BNX2I_570X_SQ_WQES_DEFAULT 128
-#define BNX2I_5770X_SQ_WQES_DEFAULT 256
+#define BNX2I_SQ_WQES_MIN 16
+#define BNX2I_570X_SQ_WQES_MAX 128
+#define BNX2I_5770X_SQ_WQES_MAX 512
+#define BNX2I_570X_SQ_WQES_DEFAULT 128
+#define BNX2I_5770X_SQ_WQES_DEFAULT 128
#define BNX2I_570X_CQ_WQES_MAX 128
#define BNX2I_5770X_CQ_WQES_MAX 512
@@ -115,6 +115,7 @@
#define BNX2X_MAX_CQS 8
#define CNIC_ARM_CQE 1
+#define CNIC_ARM_CQE_FP 2
#define CNIC_DISARM_CQE 0
#define REG_RD(__hba, offset) \
@@ -666,7 +667,9 @@ enum {
* after HBA reset is completed by bnx2i/cnic/bnx2
* modules
* @state: tracks offload connection state machine
- * @teardown_mode: indicates if conn teardown is abortive or orderly
+ * @timestamp: tracks the start time when the ep begins to connect
+ * @num_active_cmds: tracks the number of outstanding commands for this ep
+ * @ec_shift: the amount of shift as part of the event coal calc
* @qp: QP information
* @ids: contains chip allocated *context id* & driver assigned
* *iscsi cid*
@@ -685,6 +688,7 @@ struct bnx2i_endpoint {
u32 state;
unsigned long timestamp;
int num_active_cmds;
+ u32 ec_shift;
struct qp_info qp;
struct ep_handles ids;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index f0b89513faed..5c54a2d9b834 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -138,7 +138,6 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
u16 next_index;
u32 num_active_cmds;
-
/* Coalesce CQ entries only on 10G devices */
if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
return;
@@ -148,16 +147,19 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
* interrupts and other unwanted results
*/
cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
- if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
- return;
- if (action == CNIC_ARM_CQE) {
+ if (action != CNIC_ARM_CQE_FP)
+ if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
+ return;
+
+ if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
num_active_cmds = ep->num_active_cmds;
if (num_active_cmds <= event_coal_min)
next_index = 1;
else
next_index = event_coal_min +
- (num_active_cmds - event_coal_min) / event_coal_div;
+ ((num_active_cmds - event_coal_min) >>
+ ep->ec_shift);
if (!next_index)
next_index = 1;
cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
@@ -1274,6 +1276,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
iscsi_init.dummy_buffer_addr_hi =
(u32) ((u64) hba->dummy_buf_dma >> 32);
+ hba->num_ccell = hba->max_sqes >> 1;
hba->ctx_ccell_tasks =
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
iscsi_init.num_ccells_per_conn = hba->num_ccell;
@@ -1934,7 +1937,6 @@ cqe_out:
qp->cq_cons_idx++;
}
}
- bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
}
/**
@@ -1948,22 +1950,23 @@ cqe_out:
static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
struct iscsi_kcqe *new_cqe_kcqe)
{
- struct bnx2i_conn *conn;
+ struct bnx2i_conn *bnx2i_conn;
u32 iscsi_cid;
iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
- conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+ bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
- if (!conn) {
+ if (!bnx2i_conn) {
printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
return;
}
- if (!conn->ep) {
+ if (!bnx2i_conn->ep) {
printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
return;
}
-
- bnx2i_process_new_cqes(conn);
+ bnx2i_process_new_cqes(bnx2i_conn);
+ bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
+ bnx2i_process_new_cqes(bnx2i_conn);
}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 1d24a2819736..6adbdc34a9a5 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -244,7 +244,7 @@ void bnx2i_stop(void *handle)
wait_event_interruptible_timeout(hba->eh_wait,
(list_empty(&hba->ep_ofld_list) &&
list_empty(&hba->ep_destroy_list)),
- 10 * HZ);
+ 2 * HZ);
/* Wait for all endpoints to be torn down, Chip will be reset once
* control returns to network driver. So it is required to cleanup and
* release all connection resources before returning from this routine.
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1809f9ccc4ce..041928b23cb0 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -379,6 +379,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
{
struct iscsi_endpoint *ep;
struct bnx2i_endpoint *bnx2i_ep;
+ u32 ec_div;
ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
if (!ep) {
@@ -393,6 +394,11 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
bnx2i_ep->ep_iscsi_cid = (u16) -1;
bnx2i_ep->hba = hba;
bnx2i_ep->hba_age = hba->age;
+
+ ec_div = event_coal_div;
+ while (ec_div >>= 1)
+ bnx2i_ep->ec_shift += 1;
+
hba->ofld_conns_active++;
init_waitqueue_head(&bnx2i_ep->ofld_wait);
return ep;
@@ -858,7 +864,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
mutex_init(&hba->net_dev_lock);
init_waitqueue_head(&hba->eh_wait);
if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
- hba->hba_shutdown_tmo = 20 * HZ;
+ hba->hba_shutdown_tmo = 30 * HZ;
hba->conn_teardown_tmo = 20 * HZ;
hba->conn_ctx_destroy_tmo = 6 * HZ;
} else { /* 5706/5708/5709 */
@@ -1208,6 +1214,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
struct bnx2i_cmd *cmd = task->dd_data;
struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+ if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
+ return -ENOMEM;
+
/*
* If there is no scsi_cmnd this must be a mgmt task
*/
@@ -2156,7 +2165,7 @@ static struct scsi_host_template bnx2i_host_template = {
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = 1024,
.max_sectors = 127,
- .cmd_per_lun = 32,
+ .cmd_per_lun = 24,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cc23bd9480b2..155d7b9bdeae 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -137,6 +137,7 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static int fcoe_validate_vport_create(struct fc_vport *);
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@@ -2351,6 +2352,17 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
struct fc_lport *vn_port;
+ int rc;
+ char buf[32];
+
+ rc = fcoe_validate_vport_create(vport);
+ if (rc) {
+ wwn_to_str(vport->port_name, buf, sizeof(buf));
+ printk(KERN_ERR "fcoe: Failed to create vport, "
+ "WWPN (0x%s) already exists\n",
+ buf);
+ return rc;
+ }
mutex_lock(&fcoe_config_mutex);
vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
@@ -2497,3 +2509,49 @@ static void fcoe_set_port_id(struct fc_lport *lport,
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
}
+
+/**
+ * fcoe_validate_vport_create() - Validate a vport before creating it
+ * @vport: NPIV port to be created
+ *
+ * This routine is meant to add validation for a vport before creating it
+ * via fcoe_vport_create().
+ * Current validations are:
+ * - WWPN supplied is unique for given lport
+ *
+ *
+*/
+static int fcoe_validate_vport_create(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port;
+ int rc = 0;
+ char buf[32];
+
+ mutex_lock(&n_port->lp_mutex);
+
+ wwn_to_str(vport->port_name, buf, sizeof(buf));
+ /* Check if the wwpn is not same as that of the lport */
+ if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
+ FCOE_DBG("vport WWPN 0x%s is same as that of the "
+ "base port WWPN\n", buf);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Check if there is any existing vport with same wwpn */
+ list_for_each_entry(vn_port, &n_port->vports, list) {
+ if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
+ FCOE_DBG("vport with given WWPN 0x%s already "
+ "exists\n", buf);
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ mutex_unlock(&n_port->lp_mutex);
+
+ return rc;
+}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 408a6fd78fb4..c4a93993c0cf 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -99,4 +99,14 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
+static inline void wwn_to_str(u64 wwn, char *buf, int len)
+{
+ u8 wwpn[8];
+
+ u64_to_wwn(wwn, wwpn);
+ snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
+ wwpn[0], wwpn[1], wwpn[2], wwpn[3],
+ wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
+}
+
#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 229e4af5508a..c74c4b8e71ef 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1173,7 +1173,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
struct fc_lport *lport = fip->lp;
struct fc_lport *vn_port = NULL;
u32 desc_mask;
- int is_vn_port = 0;
+ int num_vlink_desc;
+ int reset_phys_port = 0;
+ struct fip_vn_desc **vlink_desc_arr = NULL;
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
@@ -1183,70 +1185,73 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
/*
* mask of required descriptors. Validating each one clears its bit.
*/
- desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID);
+ desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
desc = (struct fip_desc *)(fh + 1);
+
+ /*
+ * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen'
+ * before determining max Vx_Port descriptor but a buggy FCF could have
+ * omited either or both MAC Address and Name Identifier descriptors
+ */
+ num_vlink_desc = rlen / sizeof(*vp);
+ if (num_vlink_desc)
+ vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc,
+ GFP_ATOMIC);
+ if (!vlink_desc_arr)
+ return;
+ num_vlink_desc = 0;
+
while (rlen >= sizeof(*desc)) {
dlen = desc->fip_dlen * FIP_BPW;
if (dlen > rlen)
- return;
+ goto err;
/* Drop CVL if there are duplicate critical descriptors */
if ((desc->fip_dtype < 32) &&
+ (desc->fip_dtype != FIP_DT_VN_ID) &&
!(desc_mask & 1U << desc->fip_dtype)) {
LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
"Descriptors in FIP CVL\n");
- return;
+ goto err;
}
switch (desc->fip_dtype) {
case FIP_DT_MAC:
mp = (struct fip_mac_desc *)desc;
if (dlen < sizeof(*mp))
- return;
+ goto err;
if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
- return;
+ goto err;
desc_mask &= ~BIT(FIP_DT_MAC);
break;
case FIP_DT_NAME:
wp = (struct fip_wwn_desc *)desc;
if (dlen < sizeof(*wp))
- return;
+ goto err;
if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
- return;
+ goto err;
desc_mask &= ~BIT(FIP_DT_NAME);
break;
case FIP_DT_VN_ID:
vp = (struct fip_vn_desc *)desc;
if (dlen < sizeof(*vp))
- return;
- if (compare_ether_addr(vp->fd_mac,
- fip->get_src_addr(lport)) == 0 &&
- get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
- ntoh24(vp->fd_fc_id) == lport->port_id) {
- desc_mask &= ~BIT(FIP_DT_VN_ID);
- break;
+ goto err;
+ vlink_desc_arr[num_vlink_desc++] = vp;
+ vn_port = fc_vport_id_lookup(lport,
+ ntoh24(vp->fd_fc_id));
+ if (vn_port && (vn_port == lport)) {
+ mutex_lock(&fip->ctlr_mutex);
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->VLinkFailureCount++;
+ put_cpu();
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
}
- /* check if clr_vlink is for NPIV port */
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vn_port, &lport->vports, list) {
- if (compare_ether_addr(vp->fd_mac,
- fip->get_src_addr(vn_port)) == 0 &&
- (get_unaligned_be64(&vp->fd_wwpn)
- == vn_port->wwpn) &&
- (ntoh24(vp->fd_fc_id) ==
- fc_host_port_id(vn_port->host))) {
- desc_mask &= ~BIT(FIP_DT_VN_ID);
- is_vn_port = 1;
- break;
- }
- }
- mutex_unlock(&lport->lp_mutex);
-
break;
default:
/* standard says ignore unknown descriptors >= 128 */
if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
- return;
+ goto err;
break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
@@ -1256,26 +1261,68 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
/*
* reset only if all required descriptors were present and valid.
*/
- if (desc_mask) {
+ if (desc_mask)
LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
desc_mask);
+ else if (!num_vlink_desc) {
+ LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n");
+ /*
+ * No Vx_Port description. Clear all NPIV ports,
+ * followed by physical port
+ */
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vn_port, &lport->vports, list)
+ fc_lport_reset(vn_port);
+ mutex_unlock(&lport->lp_mutex);
+
+ mutex_lock(&fip->ctlr_mutex);
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->VLinkFailureCount++;
+ put_cpu();
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+
+ fc_lport_reset(fip->lp);
+ fcoe_ctlr_solicit(fip, NULL);
} else {
- LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+ int i;
- if (is_vn_port)
- fc_lport_reset(vn_port);
- else {
- mutex_lock(&fip->ctlr_mutex);
- per_cpu_ptr(lport->dev_stats,
- get_cpu())->VLinkFailureCount++;
- put_cpu();
- fcoe_ctlr_reset(fip);
- mutex_unlock(&fip->ctlr_mutex);
+ LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+ for (i = 0; i < num_vlink_desc; i++) {
+ vp = vlink_desc_arr[i];
+ vn_port = fc_vport_id_lookup(lport,
+ ntoh24(vp->fd_fc_id));
+ if (!vn_port)
+ continue;
+
+ /*
+ * 'port_id' is already validated, check MAC address and
+ * wwpn
+ */
+ if (compare_ether_addr(fip->get_src_addr(vn_port),
+ vp->fd_mac) != 0 ||
+ get_unaligned_be64(&vp->fd_wwpn) !=
+ vn_port->wwpn)
+ continue;
+
+ if (vn_port == lport)
+ /*
+ * Physical port, defer processing till all
+ * listed NPIV ports are cleared
+ */
+ reset_phys_port = 1;
+ else /* NPIV port */
+ fc_lport_reset(vn_port);
+ }
+ if (reset_phys_port) {
fc_lport_reset(fip->lp);
fcoe_ctlr_solicit(fip, NULL);
}
}
+
+err:
+ kfree(vlink_desc_arr);
}
/**
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index f81f77c8569e..41068e8748e7 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -544,16 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
struct fcoe_transport *ft = NULL;
enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -618,16 +608,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -672,16 +652,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -720,16 +690,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 12868ca46110..888086c4e709 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5149,21 +5149,21 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
+ num_hrrq = 0;
do {
writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
- if (int_reg & IPR_PCII_HRRQ_UPDATED) {
- ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return IRQ_HANDLED;
- }
-
} else if (rc == IRQ_NONE && irq_none == 0) {
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
irq_none++;
+ } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
+ int_reg & IPR_PCII_HRRQ_UPDATED) {
+ ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return IRQ_HANDLED;
} else
break;
}
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 911b2736cafa..b9cb8140b398 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -205,6 +205,7 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
default:
FC_DISC_DBG(disc, "Received an unsupported request, "
"the opcode is (%x)\n", op);
+ fc_frame_free(fp);
break;
}
}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 77035a746f60..3b8a6451ea28 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1434,6 +1434,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
(f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
spin_lock_bh(&ep->ex_lock);
+ resp = ep->resp;
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
@@ -1978,6 +1979,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
spin_unlock_bh(&ep->ex_lock);
return sp;
err:
+ fc_fcp_ddp_done(fr_fsp(fp));
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2a3a4720a771..9cd2149519ac 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -312,7 +312,7 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
* DDP related resources for a fcp_pkt
* @fsp: The FCP packet that DDP had been used on
*/
-static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
{
struct fc_lport *lport;
@@ -681,8 +681,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
error = lport->tt.seq_send(lport, seq, fp);
if (error) {
WARN_ON(1); /* send error should be rare */
- fc_fcp_retry_cmd(fsp);
- return 0;
+ return error;
}
fp = NULL;
}
@@ -1673,7 +1672,8 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
FC_FCTL_REQ, 0);
rec_tov = get_fsp_rec_tov(fsp);
- seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
+ seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
+ fc_fcp_pkt_destroy,
fsp, jiffies_to_msecs(rec_tov));
if (!seq)
goto retry;
@@ -1720,7 +1720,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
return;
}
- fsp->recov_seq = NULL;
switch (fc_frame_payload_op(fp)) {
case ELS_LS_ACC:
fsp->recov_retry = 0;
@@ -1732,10 +1731,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
break;
}
fc_fcp_unlock_pkt(fsp);
- fsp->lp->tt.exch_done(seq);
out:
+ fsp->lp->tt.exch_done(seq);
fc_frame_free(fp);
- fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
}
/**
@@ -1747,8 +1745,6 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
if (fc_fcp_lock_pkt(fsp))
goto out;
- fsp->lp->tt.exch_done(fsp->recov_seq);
- fsp->recov_seq = NULL;
switch (PTR_ERR(fp)) {
case -FC_EX_TIMEOUT:
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
@@ -1764,7 +1760,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
}
fc_fcp_unlock_pkt(fsp);
out:
- fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
+ fsp->lp->tt.exch_done(fsp->recov_seq);
}
/**
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index fedc819d70c0..c7d071289af5 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -108,6 +108,7 @@ extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
* Set up direct-data placement for this I/O request
*/
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp);
/*
* Module setup functions
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 31fc21f4d831..db9238f2ecb8 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -99,19 +99,29 @@ static void sas_ata_task_done(struct sas_task *task)
struct sas_ha_struct *sas_ha;
enum ata_completion_errors ac;
unsigned long flags;
+ struct ata_link *link;
if (!qc)
goto qc_already_gone;
dev = qc->ap->private_data;
sas_ha = dev->port->ha;
+ link = &dev->sata_dev.ap->link;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
- qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+
+ if (!link->sactive) {
+ qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ } else {
+ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ if (unlikely(link->eh_info.err_mask))
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
dev->sata_dev.sstatus = resp->sstatus;
dev->sata_dev.serror = resp->serror;
dev->sata_dev.scontrol = resp->scontrol;
@@ -121,7 +131,13 @@ static void sas_ata_task_done(struct sas_task *task)
SAS_DPRINTK("%s: SAS error %x\n", __func__,
stat->stat);
/* We saw a SAS error. Send a vague error. */
- qc->err_mask = ac;
+ if (!link->sactive) {
+ qc->err_mask = ac;
+ } else {
+ link->eh_info.err_mask |= AC_ERR_DEV;
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
dev->sata_dev.tf.feature = 0x04; /* status err */
dev->sata_dev.tf.command = ATA_ERR;
}
@@ -279,6 +295,44 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
return ret;
}
+static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i =
+ to_sas_internal(dev->port->ha->core.shost->transportt);
+ int res = TMF_RESP_FUNC_FAILED;
+ int ret = 0;
+
+ if (i->dft->lldd_ata_soft_reset)
+ res = i->dft->lldd_ata_soft_reset(dev);
+
+ if (res != TMF_RESP_FUNC_COMPLETE) {
+ SAS_DPRINTK("%s: Unable to soft reset\n", __func__);
+ ret = -EAGAIN;
+ }
+
+ switch (dev->sata_dev.command_set) {
+ case ATA_COMMAND_SET:
+ SAS_DPRINTK("%s: Found ATA device.\n", __func__);
+ *class = ATA_DEV_ATA;
+ break;
+ case ATAPI_COMMAND_SET:
+ SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
+ *class = ATA_DEV_ATAPI;
+ break;
+ default:
+ SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
+ __func__, dev->sata_dev.command_set);
+ *class = ATA_DEV_UNKNOWN;
+ break;
+ }
+
+ ap->cbl = ATA_CBL_SATA;
+ return ret;
+}
+
static void sas_ata_post_internal(struct ata_queued_cmd *qc)
{
if (qc->flags & ATA_QCFLAG_FAILED)
@@ -309,7 +363,7 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
static struct ata_port_operations sas_sata_ops = {
.prereset = ata_std_prereset,
- .softreset = NULL,
+ .softreset = sas_ata_soft_reset,
.hardreset = sas_ata_hard_reset,
.postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 8b538bd1ff2b..14e21b5fb8ba 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -57,7 +57,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha);
int sas_init_events(struct sas_ha_struct *sas_ha);
void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
-void sas_deform_port(struct asd_sas_phy *phy);
+void sas_deform_port(struct asd_sas_phy *phy, int gone);
void sas_porte_bytes_dmaed(struct work_struct *work);
void sas_porte_broadcast_rcvd(struct work_struct *work);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index b459c4b635b1..e0f5018e9071 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -39,7 +39,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
&phy->phy_events_pending);
phy->error = 0;
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
static void sas_phye_oob_done(struct work_struct *work)
@@ -66,7 +66,7 @@ static void sas_phye_oob_error(struct work_struct *work)
sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
&phy->phy_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
if (!port && phy->enabled && i->dft->lldd_control_phy) {
phy->error++;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 5257fdfe699a..42fd1f25b664 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -57,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (port) {
if (!phy_is_wideport_member(port, phy))
- sas_deform_port(phy);
+ sas_deform_port(phy, 0);
else {
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
__func__, phy->id, phy->port->id,
@@ -153,28 +153,31 @@ static void sas_form_port(struct asd_sas_phy *phy)
* This is called when the physical link to the other phy has been
* lost (on this phy), in Event thread context. We cannot delay here.
*/
-void sas_deform_port(struct asd_sas_phy *phy)
+void sas_deform_port(struct asd_sas_phy *phy, int gone)
{
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt);
+ struct domain_device *dev;
unsigned long flags;
if (!port)
return; /* done by a phy event */
- if (port->port_dev)
- port->port_dev->pathways--;
+ dev = port->port_dev;
+ if (dev)
+ dev->pathways--;
if (port->num_phys == 1) {
+ if (dev && gone)
+ dev->gone = 1;
sas_unregister_domain_devices(port);
sas_port_delete(port->port);
port->port = NULL;
} else
sas_port_delete_phy(port->port, phy->phy);
-
if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
@@ -244,7 +247,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
void sas_porte_timer_event(struct work_struct *work)
@@ -256,7 +259,7 @@ void sas_porte_timer_event(struct work_struct *work)
sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
void sas_porte_hard_reset(struct work_struct *work)
@@ -268,7 +271,7 @@ void sas_porte_hard_reset(struct work_struct *work)
sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
/* ---------- SAS port registration ---------- */
@@ -306,6 +309,6 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
for (i = 0; i < sas_ha->num_phys; i++)
if (sas_ha->sas_phy[i]->port)
- sas_deform_port(sas_ha->sas_phy[i]);
+ sas_deform_port(sas_ha->sas_phy[i], 0);
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f6e189f40917..eeba76cdf774 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -207,6 +207,13 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
struct sas_ha_struct *sas_ha = dev->port->ha;
struct sas_task *task;
+ /* If the device fell off, no sense in issuing commands */
+ if (dev->gone) {
+ cmd->result = DID_BAD_TARGET << 16;
+ scsi_done(cmd);
+ goto out;
+ }
+
if (dev_is_sata(dev)) {
unsigned long flags;
@@ -216,13 +223,6 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
goto out;
}
- /* If the device fell off, no sense in issuing commands */
- if (dev->gone) {
- cmd->result = DID_BAD_TARGET << 16;
- scsi_done(cmd);
- goto out;
- }
-
res = -ENOMEM;
task = sas_create_task(cmd, dev, GFP_ATOMIC);
if (!task)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 02d53d89534f..8ec2c86a49d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -41,6 +41,7 @@ struct lpfc_sli2_slim;
downloads using bsg */
#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
@@ -486,6 +487,42 @@ struct unsol_rcv_ct_ctx {
(1 << LPFC_USER_LINK_SPEED_AUTO))
#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
+enum nemb_type {
+ nemb_mse = 1,
+ nemb_hbd
+};
+
+enum mbox_type {
+ mbox_rd = 1,
+ mbox_wr
+};
+
+enum dma_type {
+ dma_mbox = 1,
+ dma_ebuf
+};
+
+enum sta_type {
+ sta_pre_addr = 1,
+ sta_pos_addr
+};
+
+struct lpfc_mbox_ext_buf_ctx {
+ uint32_t state;
+#define LPFC_BSG_MBOX_IDLE 0
+#define LPFC_BSG_MBOX_HOST 1
+#define LPFC_BSG_MBOX_PORT 2
+#define LPFC_BSG_MBOX_DONE 3
+#define LPFC_BSG_MBOX_ABTS 4
+ enum nemb_type nembType;
+ enum mbox_type mboxType;
+ uint32_t numBuf;
+ uint32_t mbxTag;
+ uint32_t seqNum;
+ struct lpfc_dmabuf *mbx_dmabuf;
+ struct list_head ext_dmabuf_list;
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf)
@@ -589,6 +626,7 @@ struct lpfc_hba {
MAILBOX_t *mbox;
uint32_t *mbox_ext;
+ struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
uint32_t ha_copy;
struct _PCB *pcb;
struct _IOCB *IOCBs;
@@ -659,6 +697,7 @@ struct lpfc_hba {
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
+ uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
@@ -706,7 +745,6 @@ struct lpfc_hba {
uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
int brd_no; /* FC board number */
-
char SerialNumber[32]; /* adapter Serial Number */
char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
char ModelDesc[256]; /* Model Description */
@@ -778,6 +816,9 @@ struct lpfc_hba {
uint16_t vpi_base;
uint16_t vfi_base;
unsigned long *vpi_bmask; /* vpi allocation table */
+ uint16_t *vpi_ids;
+ uint16_t vpi_count;
+ struct list_head lpfc_vpi_blk_list;
/* Data structure used by fabric iocb scheduler */
struct list_head fabric_iocb_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8dcbf8fff673..135a53baa735 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,73 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Request SLI4 interface type-2 device to perform a physical register set
+ * access.
+ *
+ * Returns:
+ * zero for success
+ **/
+static ssize_t
+lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
+{
+ struct completion online_compl;
+ uint32_t reg_val;
+ int status = 0;
+ int rc;
+
+ if (!phba->cfg_enable_hba_reset)
+ return -EIO;
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2))
+ return -EPERM;
+
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+ if (status != 0)
+ return status;
+
+ /* wait for the device to be quiesced before firmware reset */
+ msleep(100);
+
+ reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+
+ if (opcode == LPFC_FW_DUMP)
+ reg_val |= LPFC_FW_DUMP_REQUEST;
+ else if (opcode == LPFC_FW_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_FRST;
+ else if (opcode == LPFC_DV_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_DRST;
+
+ writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+ /* flush */
+ readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* delay driver action following IF_TYPE_2 reset */
+ msleep(100);
+
+ init_completion(&online_compl);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
+ wait_for_completion(&online_compl);
+
+ if (status != 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
* lpfc_nport_evt_cnt_show - Return the number of nport events
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -848,6 +915,12 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_KILL);
+ else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
+ else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
+ else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
else
return -EINVAL;
@@ -1322,6 +1395,102 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns the maximum number of virtual functions a physical function can
+ * support, 0 will be returned if called on virtual function.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sriov_hw_max_virtfn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct pci_dev *pdev = phba->pcidev;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
+ struct lpfc_rsrc_desc_pcie *desc;
+ uint32_t max_nr_virtfn;
+ uint32_t desc_count;
+ int length, rc, i;
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2))
+ return -EPERM;
+
+ if (!pdev->is_physfn)
+ return snprintf(buf, PAGE_SIZE, "%d\n", 0);
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ /* get the maximum number of virtfn support by physfn */
+ length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
+ length, LPFC_SLI4_MBX_EMBED);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
+ phba->sli4_hba.iov.pf_number + 1);
+
+ get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
+ bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
+ LPFC_CFG_TYPE_CURRENT_ACTIVE);
+
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
+ lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+
+ if (rc != MBX_TIMEOUT) {
+ /* check return status */
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (shdr_status || shdr_add_status || rc)
+ goto error_out;
+
+ } else
+ goto error_out;
+
+ desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
+
+ for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+ desc = (struct lpfc_rsrc_desc_pcie *)
+ &get_prof_cfg->u.response.prof_cfg.desc[i];
+ if (LPFC_RSRC_DESC_TYPE_PCIE ==
+ bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+ max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
+ desc);
+ break;
+ }
+ }
+
+ if (i < LPFC_RSRC_DESC_MAX_NUM) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+ }
+
+error_out:
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+}
+
+/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -1762,6 +1931,8 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
+static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
+ lpfc_sriov_hw_max_virtfn_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -3014,7 +3185,7 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
*
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing enable or disable aer flag.
* @count: unused variable.
*
* Description:
@@ -3098,7 +3269,7 @@ lpfc_param_show(aer_support)
/**
* lpfc_aer_support_init - Set the initial adapters aer support flag
* @phba: lpfc_hba pointer.
- * @val: link speed value.
+ * @val: enable aer or disable aer flag.
*
* Description:
* If val is in a valid range [0,1], then set the adapter's initial
@@ -3137,7 +3308,7 @@ static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
* lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing flag 1 for aer cleanup state.
* @count: unused variable.
*
* Description:
@@ -3180,6 +3351,136 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
lpfc_aer_cleanup_state);
+/**
+ * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ * When this api is called either through user sysfs, the driver shall
+ * try to enable or disable SR-IOV virtual functions according to the
+ * following:
+ *
+ * If zero virtual function has been enabled to the physical function,
+ * the driver shall invoke the pci enable virtual function api trying
+ * to enable the virtual functions. If the nr_vfn provided is greater
+ * than the maximum supported, the maximum virtual function number will
+ * be used for invoking the api; otherwise, the nr_vfn provided shall
+ * be used for invoking the api. If the api call returned success, the
+ * actual number of virtual functions enabled will be set to the driver
+ * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
+ * cfg_sriov_nr_virtfn remains zero.
+ *
+ * If none-zero virtual functions have already been enabled to the
+ * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
+ * -EINVAL will be returned and the driver does nothing;
+ *
+ * If the nr_vfn provided is zero and none-zero virtual functions have
+ * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
+ * disabling virtual function api shall be invoded to disable all the
+ * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
+ * zero. Otherwise, if zero virtual function has been enabled, do
+ * nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct pci_dev *pdev = phba->pcidev;
+ int val = 0, rc = -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+ if (val < 0)
+ return -EINVAL;
+
+ /* Request disabling virtual functions */
+ if (val == 0) {
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ return strlen(buf);
+ }
+
+ /* Request enabling virtual functions */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3018 There are %d virtual functions "
+ "enabled on physical function.\n",
+ phba->cfg_sriov_nr_virtfn);
+ return -EEXIST;
+ }
+
+ if (val <= LPFC_MAX_VFN_PER_PFN)
+ phba->cfg_sriov_nr_virtfn = val;
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3019 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+ }
+
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ phba->cfg_sriov_nr_virtfn = 0;
+ rc = -EPERM;
+ } else
+ rc = strlen(buf);
+
+ return rc;
+}
+
+static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
+module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
+lpfc_param_show(sriov_nr_virtfn)
+
+/**
+ * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0,255], then set the adapter's initial
+ * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
+ * number shall be used instead. It will be up to the driver's probe_one
+ * routine to determine whether the device's SR-IOV is supported or not.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
+ phba->cfg_sriov_nr_virtfn = val;
+ return 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3017 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
+ lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
# Value range is [2,3]. Default value is 3.
@@ -3497,6 +3798,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_prot_sg_seg_cnt,
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
+ &dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_suppress_link_up,
&dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
@@ -3505,6 +3807,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
&dev_attr_lpfc_dss,
+ &dev_attr_lpfc_sriov_hw_max_virtfn,
NULL,
};
@@ -3961,7 +4264,7 @@ static struct bin_attribute sysfs_mbox_attr = {
.name = "mbox",
.mode = S_IRUSR | S_IWUSR,
},
- .size = MAILBOX_CMD_SIZE,
+ .size = MAILBOX_SYSFS_MAX,
.read = sysfs_mbox_read,
.write = sysfs_mbox_write,
};
@@ -4705,6 +5008,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
+ lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
phba->cfg_enable_dss = 1;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 853e5042f39c..7fb0ba4cbfa7 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/list.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -79,8 +80,7 @@ struct lpfc_bsg_iocb {
struct lpfc_bsg_mbox {
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *mb;
- struct lpfc_dmabuf *rxbmp; /* for BIU diags */
- struct lpfc_dmabufext *dmp; /* for BIU diags */
+ struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
uint8_t *ext; /* extended mailbox data */
uint32_t mbOffset; /* from app */
uint32_t inExtWLen; /* from app */
@@ -332,6 +332,8 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
cmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
cmd->ulpOwner = OWN_CHIP;
cmdiocbq->vport = phba->pport;
cmdiocbq->context3 = bmp;
@@ -1336,6 +1338,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
}
icmd->un.ulpWord[3] = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->ulpContext =
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
/* The exchange is done, mark the entry as invalid */
phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
} else
@@ -1463,11 +1469,91 @@ send_mgmt_rsp_exit:
}
/**
- * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
+ * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
+ * @phba: Pointer to HBA context object.
* @job: LPFC_BSG_VENDOR_DIAG_MODE
*
- * This function is responsible for placing a port into diagnostic loopback
- * mode in order to perform a diagnostic loopback test.
+ * This function is responsible for preparing driver for diag loopback
+ * on device.
+ */
+static int
+lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ int i = 0;
+
+ psli = &phba->sli;
+ if (!psli)
+ return -ENODEV;
+
+ pring = &psli->ring[LPFC_FCP_RING];
+ if (!pring)
+ return -ENODEV;
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ return -EACCES;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_block_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_block_requests(shost);
+ }
+
+ while (pring->txcmplq_cnt) {
+ if (i++ > 500) /* wait up to 5 seconds */
+ break;
+ msleep(10);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for driver exit processing of setting up
+ * diag loopback mode on device.
+ */
+static void
+lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_unblock_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_unblock_requests(shost);
+ }
+ return;
+}
+
+/**
+ * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli3 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
* All new scsi requests are blocked, a small delay is used to allow the
* scsi requests to complete then the link is brought down. If the link is
* is placed in loopback mode then scsi requests are again allowed
@@ -1475,17 +1561,11 @@ send_mgmt_rsp_exit:
* All of this is done in-line.
*/
static int
-lpfc_bsg_diag_mode(struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
{
- struct Scsi_Host *shost = job->shost;
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
struct diag_mode_set *loopback_mode;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
uint32_t link_flags;
uint32_t timeout;
- struct lpfc_vport **vports;
LPFC_MBOXQ_t *pmboxq;
int mbxstatus;
int i = 0;
@@ -1494,53 +1574,33 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
- if (job->request_len <
- sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2738 Received DIAG MODE request below minimum "
- "size\n");
+ "2738 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
rc = -EINVAL;
goto job_error;
}
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
loopback_mode = (struct diag_mode_set *)
job->request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
- if ((phba->link_state == LPFC_HBA_ERROR) ||
- (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
- rc = -EACCES;
- goto job_error;
- }
-
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
- goto job_error;
- }
-
- vports = lpfc_create_vport_work_array(phba);
- if (vports) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- scsi_block_requests(shost);
- }
-
- lpfc_destroy_vport_work_array(phba, vports);
- } else {
- shost = lpfc_shost_from_vport(phba->pport);
- scsi_block_requests(shost);
+ goto loopback_mode_exit;
}
-
- while (pring->txcmplq_cnt) {
- if (i++ > 500) /* wait up to 5 seconds */
- break;
-
- msleep(10);
- }
-
memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
@@ -1594,22 +1654,186 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
rc = -ENODEV;
loopback_mode_exit:
- vports = lpfc_create_vport_work_array(phba);
- if (vports) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- scsi_unblock_requests(shost);
+ lpfc_bsg_diag_mode_exit(phba);
+
+ /*
+ * Let SLI layer release mboxq if mbox command completed after timeout.
+ */
+ if (mbxstatus != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
+ * @phba: Pointer to HBA context object.
+ * @diag: Flag for set link to diag or nomral operation state.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * link to either diag state or normal operation state.
+ */
+static int
+lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ struct lpfc_mbx_set_link_diag_state *link_diag_state;
+ uint32_t req_len, alloc_len;
+ int mbxstatus = MBX_SUCCESS, rc;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_state_set_out;
+ }
+ link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
+ bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
+ phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
+ phba->sli4_hba.link_state.type);
+ if (diag)
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 1);
+ else
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 0);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
+ rc = 0;
+ else
+ rc = -ENODEV;
+
+link_diag_state_set_out:
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli4 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ */
+static int
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct diag_mode_set *loopback_mode;
+ uint32_t link_flags, timeout, req_len, alloc_len;
+ struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ int mbxstatus, i, rc = 0;
+
+ /* no data to return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3011 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
+ loopback_mode = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ link_flags = loopback_mode->type;
+ timeout = loopback_mode->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+ if (rc)
+ goto loopback_mode_exit;
+
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ goto loopback_mode_exit;
+ }
+ msleep(10);
+ }
+ /* set up loopback mode */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto loopback_mode_exit;
+ }
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto loopback_mode_exit;
+ }
+ link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+ bf_set(lpfc_mbx_set_diag_state_link_num,
+ &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
+ if (link_flags == INTERNAL_LOOP_BACK)
+ bf_set(lpfc_mbx_set_diag_lpbk_type,
+ &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+ else
+ bf_set(lpfc_mbx_set_diag_lpbk_type,
+ &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
+ rc = -ENODEV;
+ else {
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ /* wait for the link attention interrupt */
+ msleep(100);
+ i = 0;
+ while (phba->link_state != LPFC_HBA_READY) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+ msleep(10);
}
- lpfc_destroy_vport_work_array(phba, vports);
- } else {
- shost = lpfc_shost_from_vport(phba->pport);
- scsi_unblock_requests(shost);
}
+loopback_mode_exit:
+ lpfc_bsg_diag_mode_exit(phba);
+
/*
* Let SLI layer release mboxq if mbox command completed after timeout.
*/
- if (mbxstatus != MBX_TIMEOUT)
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
mempool_free(pmboxq, phba->mbox_mem_pool);
job_error:
@@ -1622,6 +1846,234 @@ job_error:
}
/**
+ * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ int rc;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
+ else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)
+ rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
+ else
+ rc = -ENODEV;
+
+ return rc;
+
+}
+
+/**
+ * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ int rc;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return -ENODEV;
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+ if (!rc)
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
+ * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
+ *
+ * This function is to perform SLI4 diag link test request from the user
+ * applicaiton.
+ */
+static int
+lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmboxq;
+ struct sli4_link_diag *link_diag_test_cmd;
+ uint32_t req_len, alloc_len;
+ uint32_t timeout;
+ struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct diag_status *diag_status_reply;
+ int mbxstatus, rc = 0;
+
+ shost = job->shost;
+ if (!shost) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ phba = vport->phba;
+ if (!phba) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3013 Received LINK DIAG TEST request "
+ " size:%d below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ link_diag_test_cmd = (struct sli4_link_diag *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ timeout = link_diag_test_cmd->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+
+ if (rc)
+ goto job_error;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+ run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
+ bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
+ phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
+ phba->sli4_hba.link_state.type);
+ bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_id);
+ bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
+ link_diag_test_cmd->loops);
+ bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_version);
+ bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
+ link_diag_test_cmd->error_action);
+
+ mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || mbxstatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3010 Run link diag test mailbox failed with "
+ "mbx_status x%x status x%x, add_status x%x\n",
+ mbxstatus, shdr_status, shdr_add_status);
+ }
+
+ diag_status_reply = (struct diag_status *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3012 Received Run link diag test reply "
+ "below minimum size (%d): reply_len:%d\n",
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_status)),
+ job->reply_len);
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ diag_status_reply->mbox_status = mbxstatus;
+ diag_status_reply->shdr_status = shdr_status;
+ diag_status_reply->shdr_add_status = shdr_add_status;
+
+link_diag_test_exit:
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
* lpfcdiag_loop_self_reg - obtains a remote port login id
* @phba: Pointer to HBA context object
* @rpi: Pointer to a remote port login id
@@ -1851,6 +2303,86 @@ err_get_xri_exit:
}
/**
+ * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object
+ *
+ * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
+ * retruns the pointer to the buffer.
+ **/
+static struct lpfc_dmabuf *
+lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
+{
+ struct lpfc_dmabuf *dmabuf;
+ struct pci_dev *pcidev = phba->pcidev;
+
+ /* allocate dma buffer struct */
+ dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return NULL;
+
+ INIT_LIST_HEAD(&dmabuf->list);
+
+ /* now, allocate dma buffer */
+ dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ &(dmabuf->phys), GFP_KERNEL);
+
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ return NULL;
+ }
+ memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
+
+ return dmabuf;
+}
+
+/**
+ * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
+ *
+ * This routine just simply frees a dma buffer and its associated buffer
+ * descriptor referred by @dmabuf.
+ **/
+static void
+lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
+{
+ struct pci_dev *pcidev = phba->pcidev;
+
+ if (!dmabuf)
+ return;
+
+ if (dmabuf->virt)
+ dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ return;
+}
+
+/**
+ * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object.
+ * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
+ *
+ * This routine just simply frees all dma buffers and their associated buffer
+ * descriptors referred by @dmabuf_list.
+ **/
+static void
+lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
+ struct list_head *dmabuf_list)
+{
+ struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+
+ if (list_empty(dmabuf_list))
+ return;
+
+ list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
+ list_del_init(&dmabuf->list);
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ }
+ return;
+}
+
+/**
* diag_cmd_data_alloc - fills in a bde struct with dma buffers
* @phba: Pointer to HBA context object
* @bpl: Pointer to 64 bit bde structure
@@ -2067,7 +2599,7 @@ err_post_rxbufs_exit:
}
/**
- * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
+ * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
* @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
*
* This function receives a user data buffer to be transmitted and received on
@@ -2086,7 +2618,7 @@ err_post_rxbufs_exit:
* of loopback mode.
**/
static int
-lpfc_bsg_diag_test(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -2411,7 +2943,7 @@ job_error:
}
/**
- * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
+ * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
*
@@ -2422,15 +2954,13 @@ job_error:
* of the mailbox.
**/
void
-lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
- struct lpfc_mbx_nembed_cmd *nembed_sge;
uint32_t size;
unsigned long flags;
- uint8_t *to;
- uint8_t *from;
+ uint8_t *pmb, *pmb_buf;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
@@ -2440,62 +2970,21 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
return;
}
- /* build the outgoing buffer to do an sg copy
- * the format is the response mailbox followed by any extended
- * mailbox data
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
*/
- from = (uint8_t *)&pmboxq->u.mb;
- to = (uint8_t *)dd_data->context_un.mbox.mb;
- memcpy(to, from, sizeof(MAILBOX_t));
- if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
- /* copy the extended data if any, count is in words */
- if (dd_data->context_un.mbox.outExtWLen) {
- from = (uint8_t *)dd_data->context_un.mbox.ext;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.outExtWLen *
- sizeof(uint32_t);
- memcpy(to, from, size);
- } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
- from = (uint8_t *)dd_data->context_un.mbox.
- dmp->dma.virt;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.dmp->size;
- memcpy(to, from, size);
- } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
- from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
- virt;
- to += sizeof(MAILBOX_t);
- size = pmboxq->u.mb.un.varWords[5];
- memcpy(to, from, size);
- } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
- nembed_sge = (struct lpfc_mbx_nembed_cmd *)
- &pmboxq->u.mb.un.varWords[0];
-
- from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
- virt;
- to += sizeof(MAILBOX_t);
- size = nembed_sge->sge[0].length;
- memcpy(to, from, size);
- } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
- from = (uint8_t *)dd_data->context_un.
- mbox.dmp->dma.virt;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.dmp->size;
- memcpy(to, from, size);
- }
- }
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
- from = (uint8_t *)dd_data->context_un.mbox.mb;
job = dd_data->context_un.mbox.set_job;
if (job) {
size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- from, size);
- job->reply->result = 0;
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
/* need to hold the lock until we set job->dd_data to NULL
* to hold off the timeout handler returning to the mid-layer
* while we are still processing the job.
@@ -2503,28 +2992,19 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
job->dd_data = NULL;
dd_data->context_un.mbox.set_job = NULL;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
} else {
dd_data->context_un.mbox.set_job = NULL;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
}
- kfree(dd_data->context_un.mbox.mb);
mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
- kfree(dd_data->context_un.mbox.ext);
- if (dd_data->context_un.mbox.dmp) {
- dma_free_coherent(&phba->pcidev->dev,
- dd_data->context_un.mbox.dmp->size,
- dd_data->context_un.mbox.dmp->dma.virt,
- dd_data->context_un.mbox.dmp->dma.phys);
- kfree(dd_data->context_un.mbox.dmp);
- }
- if (dd_data->context_un.mbox.rxbmp) {
- lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
- dd_data->context_un.mbox.rxbmp->phys);
- kfree(dd_data->context_un.mbox.rxbmp);
- }
+ lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
kfree(dd_data);
+
+ if (job) {
+ job->reply->result = 0;
+ job->job_done(job);
+ }
return;
}
@@ -2619,6 +3099,1006 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
}
/**
+ * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * @phba: Pointer to HBA context object.
+ *
+ * This is routine clean up and reset BSG handling of multi-buffer mbox
+ * command session.
+ **/
+static void
+lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
+ return;
+
+ /* free all memory, including dma buffers */
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
+ /* multi-buffer write mailbox command pass-through complete */
+ memset((char *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is routine handles BSG job for mailbox commands completions with
+ * multiple external buffers.
+ **/
+static struct fc_bsg_job *
+lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ uint8_t *pmb, *pmb_buf;
+ unsigned long flags;
+ uint32_t size;
+ int rc = 0;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = pmboxq->context1;
+ /* has the job already timed out? */
+ if (!dd_data) {
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ job = NULL;
+ goto job_done_out;
+ }
+
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
+ */
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+
+ job = dd_data->context_un.mbox.set_job;
+ if (job) {
+ size = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
+ /* result for successful */
+ job->reply->result = 0;
+ job->dd_data = NULL;
+ /* need to hold the lock util we set job->dd_data to NULL
+ * to hold off the timeout handler from midlayer to take
+ * any action.
+ */
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2937 SLI_CONFIG ext-buffer maibox command "
+ "(x%x/x%x) complete bsg job done, bsize:%d\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, size);
+ } else
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+job_done_out:
+ if (!job)
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2938 SLI_CONFIG ext-buffer maibox "
+ "command (x%x/x%x) failure, rc:x%x\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, rc);
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
+ kfree(dd_data);
+
+ return job;
+}
+
+/**
+ * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox read commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ /* handle the BSG job with mailbox command */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2939 SLI_CONFIG ext-buffer rd maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* free base driver mailbox structure memory */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ /* complete the bsg job if we have it */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox write commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ /* handle the BSG job with the mailbox command */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2940 SLI_CONFIG ext-buffer wr maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ /* free all memory, including dma buffers */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* complete the bsg job if we have it */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+static void
+lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+ uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
+ struct lpfc_dmabuf *ext_dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2943 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ } else {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2944 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ }
+ } else {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3007 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+
+ } else {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3008 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+ }
+ }
+ return;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+ uint32_t ext_buf_cnt, ext_buf_index;
+ struct lpfc_dmabuf *ext_dmabuf = NULL;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *pmbx;
+ int rc, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2945 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2941 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2946 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2942 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ /* reject non-embedded mailbox command with none external buffer */
+ if (ext_buf_cnt == 0) {
+ rc = -EPERM;
+ goto job_error;
+ } else if (ext_buf_cnt > 1) {
+ /* additional external read buffers */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!ext_dmabuf) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ list_add_tail(&ext_dmabuf->list,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ }
+ }
+
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* for the rest of external buffer descriptors if any */
+ if (ext_buf_cnt > 1) {
+ ext_buf_index = 1;
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
+ ext_buf_index, dmabuf,
+ curr_dmabuf);
+ ext_buf_index++;
+ }
+ }
+
+ /* construct base driver mbox command */
+ pmb = &pmboxq->u.mb;
+ pmbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, pmbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2947 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2948 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ kfree(dd_data);
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t ext_buf_cnt;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *mbx;
+ int rc = 0, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2953 Handled SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2949 Handled SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2954 Handled SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2950 Handled SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ if (ext_buf_cnt == 0)
+ return -EPERM;
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* log for looking forward */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ if (nemb_tp == nemb_mse)
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
+ i, sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[i].buf_len);
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
+ i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[i]));
+ }
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ if (ext_buf_cnt == 1) {
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmb = &pmboxq->u.mb;
+ mbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, mbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2955 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2956 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ }
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
+ * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded sussystem 0x1 and opcodes with external HBDs.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t subsys;
+ uint32_t opcode;
+ int rc = SLI_CONFIG_NOT_HANDLED;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+ subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
+ switch (opcode) {
+ case FCOE_OPCODE_READ_FCF:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2957 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ case FCOE_OPCODE_ADD_FCF:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2958 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2959 Not handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2977 Handled SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ }
+ } else {
+ subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ case COMN_OPCODE_READ_OBJECT_LIST:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2960 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ case COMN_OPCODE_WRITE_OBJECT:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2961 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2962 Not handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2978 Handled SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ }
+ }
+ return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine is for requesting to abort a pass-through mailbox command with
+ * multiple external buffers due to error condition.
+ **/
+static void
+lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+ else
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ return;
+}
+
+/**
+ * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine extracts the next mailbox read external buffer back to
+ * user space through BSG.
+ **/
+static int
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct lpfc_dmabuf *dmabuf;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2963 SLI_CONFIG (mse) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ } else {
+ size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2964 SLI_CONFIG (hbd) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ }
+ if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
+ return -EPIPE;
+ dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+ struct lpfc_dmabuf, list);
+ list_del_init(&dmabuf->list);
+ pbuf = (uint8_t *)dmabuf->virt;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pbuf, size);
+
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
+ "command session done\n");
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ }
+
+ job->reply->result = 0;
+ job->job_done(job);
+
+ return SLI_CONFIG_HANDLED;
+}
+
+/**
+ * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine sets up the next mailbox read external buffer obtained
+ * from user space through BSG.
+ **/
+static int
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ enum nemb_type nemb_tp;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+ int rc;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+ nemb_tp = phba->mbox_ext_buf_ctx.nembType;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ pbuf = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ pbuf, size);
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2966 SLI_CONFIG (mse) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2967 SLI_CONFIG (hbd) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ }
+
+ /* set up external buffer descriptor and add to external buffer list */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
+ phba->mbox_ext_buf_ctx.mbx_dmabuf,
+ dmabuf);
+ list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2968 SLI_CONFIG ext-buffer wr all %d "
+ "ebuffers received\n",
+ phba->mbox_ext_buf_ctx.numBuf);
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+ pmb = &pmboxq->u.mb;
+ memcpy(pmb, pbuf, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer write mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2969 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2970 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ goto job_error;
+ }
+
+ /* wait for additoinal external buffers */
+ job->reply->result = 0;
+ job->job_done(job);
+ return SLI_CONFIG_HANDLED;
+
+job_error:
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
+ * command with multiple non-embedded external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ int rc;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2971 SLI_CONFIG buffer (type:x%x)\n",
+ phba->mbox_ext_buf_ctx.mboxType);
+
+ if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2972 SLI_CONFIG rd buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_read_ebuf_get(phba, job);
+ if (rc == SLI_CONFIG_HANDLED)
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2973 SLI_CONFIG wr buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * (0x9B) mailbox commands and external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ int rc;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* mbox command with/without single external buffer */
+ if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
+ return SLI_CONFIG_NOT_HANDLED;
+
+ /* mbox command and first external buffer */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
+ if (mbox_req->extSeqNum == 1) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2974 SLI_CONFIG mailbox: tag:%d, "
+ "seq:%d\n", mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
+ return rc;
+ } else
+ goto sli_cfg_ext_error;
+ }
+
+ /*
+ * handle additional external buffers
+ */
+
+ /* check broken pipe conditions */
+ if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
+ goto sli_cfg_ext_error;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2975 SLI_CONFIG mailbox external buffer: "
+ "extSta:x%x, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
+ return rc;
+
+sli_cfg_ext_error:
+ /* all other cases, broken pipe */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2976 SLI_CONFIG mailbox broken pipe: "
+ "ctxSta:x%x, ctxNumBuf:%d "
+ "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state,
+ phba->mbox_ext_buf_ctx.numBuf,
+ phba->mbox_ext_buf_ctx.mbxTag,
+ phba->mbox_ext_buf_ctx.seqNum,
+ mbox_req->extMboxTag, mbox_req->extSeqNum);
+
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ return -EPIPE;
+}
+
+/**
* lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
* @phba: Pointer to HBA context object.
* @mb: Pointer to a mailbox object.
@@ -2638,22 +4118,21 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
/* a 4k buffer to hold the mb and extended data from/to the bsg */
- MAILBOX_t *mb = NULL;
+ uint8_t *pmbx = NULL;
struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
- uint32_t size;
- struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
- struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
- struct ulp_bde64 *rxbpl = NULL;
- struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ struct lpfc_dmabuf *dmabuf = NULL;
+ struct dfc_mbox_req *mbox_req;
struct READ_EVENT_LOG_VAR *rdEventLog;
uint32_t transmit_length, receive_length, mode;
+ struct lpfc_mbx_sli4_config *sli4_config;
struct lpfc_mbx_nembed_cmd *nembed_sge;
struct mbox_header *header;
struct ulp_bde64 *bde;
uint8_t *ext = NULL;
int rc = 0;
uint8_t *from;
+ uint32_t size;
+
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
@@ -2665,6 +4144,18 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
+ /*
+ * Don't allow mailbox commands to be sent when blocked or when in
+ * the middle of discovery
+ */
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ rc = -EAGAIN;
+ goto job_done;
+ }
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
/* check if requested extended data lengths are valid */
if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
(mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
@@ -2672,6 +4163,32 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
+ dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!dmabuf || !dmabuf->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ /* Get the mailbox command or external buffer from BSG */
+ pmbx = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, pmbx, size);
+
+ /* Handle possible SLI_CONFIG with non-embedded payloads */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
+ if (rc == SLI_CONFIG_HANDLED)
+ goto job_cont;
+ if (rc)
+ goto job_done;
+ /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
+ }
+
+ rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
+ if (rc != 0)
+ goto job_done; /* must be negative */
+
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
@@ -2681,12 +4198,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
- mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
- if (!mb) {
- rc = -ENOMEM;
- goto job_done;
- }
-
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
@@ -2694,17 +4205,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
- size = job->request_payload.payload_len;
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- mb, size);
-
- rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
- if (rc != 0)
- goto job_done; /* must be negative */
-
pmb = &pmboxq->u.mb;
- memcpy(pmb, mb, sizeof(*pmb));
+ memcpy(pmb, pmbx, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = vport;
@@ -2721,30 +4223,13 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
"0x%x while in stopped state.\n",
pmb->mbxCommand);
- /* Don't allow mailbox commands to be sent when blocked
- * or when in the middle of discovery
- */
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- rc = -EAGAIN;
- goto job_done;
- }
-
/* extended mailbox commands will need an extended buffer */
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
- ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
- if (!ext) {
- rc = -ENOMEM;
- goto job_done;
- }
-
/* any data for the device? */
if (mbox_req->inExtWLen) {
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)ext, from,
- mbox_req->inExtWLen * sizeof(uint32_t));
+ from = pmbx;
+ ext = from + sizeof(MAILBOX_t);
}
-
pmboxq->context2 = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
@@ -2768,46 +4253,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
- putPaddrHigh(dmp->dma.phys);
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
- putPaddrLow(dmp->dma.phys);
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
- putPaddrHigh(dmp->dma.phys +
- pmb->un.varBIUdiag.un.s2.
- xmit_bde64.tus.f.bdeSize);
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
- putPaddrLow(dmp->dma.phys +
- pmb->un.varBIUdiag.un.s2.
- xmit_bde64.tus.f.bdeSize);
-
- /* copy the transmit data found in the mailbox extension area */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
rdEventLog = &pmb->un.varRdEventLog;
receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
@@ -2823,33 +4279,10 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* mode zero uses a bde like biu diags command */
if (mode == 0) {
-
- /* rebuild the command for sli4 using our own buffers
- * like we do for biu diags
- */
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- if (rxbpl) {
- INIT_LIST_HEAD(&rxbmp->list);
- dmp = diag_cmd_data_alloc(phba, rxbpl,
- receive_length, 0);
- }
-
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
- pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
}
} else if (phba->sli_rev == LPFC_SLI_REV4) {
if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
@@ -2860,36 +4293,14 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* receive length cannot be greater than mailbox
* extension size
*/
- if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
+ if (receive_length == 0) {
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
- 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
- pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
pmb->un.varUpdateCfg.co) {
bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
@@ -2899,102 +4310,53 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl,
- bde->tus.f.bdeSize, 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- bde->addrHigh = putPaddrHigh(dmp->dma.phys);
- bde->addrLow = putPaddrLow(dmp->dma.phys);
-
- /* copy the transmit data found in the mailbox
- * extension area
- */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from,
- bde->tus.f.bdeSize);
+ bde->addrHigh = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ bde->addrLow = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
- /* rebuild the command for sli4 using our own buffers
- * like we do for biu diags
- */
- header = (struct mbox_header *)&pmb->un.varWords[0];
- nembed_sge = (struct lpfc_mbx_nembed_cmd *)
- &pmb->un.varWords[0];
- receive_length = nembed_sge->sge[0].length;
-
- /* receive length cannot be greater than mailbox
- * extension size
- */
- if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
- rc = -ERANGE;
- goto job_done;
- }
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
+ /* Handling non-embedded SLI_CONFIG mailbox command */
+ sli4_config = &pmboxq->u.mqe.un.sli4_config;
+ if (!bf_get(lpfc_mbox_hdr_emb,
+ &sli4_config->header.cfg_mhdr)) {
+ /* rebuild the command for sli4 using our
+ * own buffers like we do for biu diags
+ */
+ header = (struct mbox_header *)
+ &pmb->un.varWords[0];
+ nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+ &pmb->un.varWords[0];
+ receive_length = nembed_sge->sge[0].length;
+
+ /* receive length cannot be greater than
+ * mailbox extension size
+ */
+ if ((receive_length == 0) ||
+ (receive_length > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
- 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
+ nembed_sge->sge[0].pa_hi =
+ putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ nembed_sge->sge[0].pa_lo =
+ putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
}
-
- INIT_LIST_HEAD(&dmp->dma.list);
- nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
- nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
- /* copy the transmit data found in the mailbox
- * extension area
- */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from,
- header->cfg_mhdr.payload_length);
}
}
- dd_data->context_un.mbox.rxbmp = rxbmp;
- dd_data->context_un.mbox.dmp = dmp;
+ dd_data->context_un.mbox.dmabuffers = dmabuf;
/* setup wake call as IOCB callback */
- pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->context_un.mbox.pmboxq = pmboxq;
- dd_data->context_un.mbox.mb = mb;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
dd_data->context_un.mbox.set_job = job;
dd_data->context_un.mbox.ext = ext;
dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
@@ -3011,11 +4373,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
/* job finished, copy the data */
- memcpy(mb, pmb, sizeof(*pmb));
+ memcpy(pmbx, pmb, sizeof(*pmb));
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- mb, size);
+ job->reply_payload.sg_cnt,
+ pmbx, size);
/* not waiting mbox already done */
rc = 0;
goto job_done;
@@ -3027,22 +4389,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
job_done:
/* common exit for error or job completed inline */
- kfree(mb);
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
- kfree(ext);
- if (dmp) {
- dma_free_coherent(&phba->pcidev->dev,
- dmp->size, dmp->dma.virt,
- dmp->dma.phys);
- kfree(dmp);
- }
- if (rxbmp) {
- lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
- kfree(rxbmp);
- }
+ lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
+job_cont:
return rc;
}
@@ -3055,37 +4407,28 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ struct dfc_mbox_req *mbox_req;
int rc = 0;
- /* in case no data is transferred */
+ /* mix-and-match backward compatibility */
job->reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2737 Received MBOX_REQ request below "
- "minimum size\n");
- rc = -EINVAL;
- goto job_error;
- }
-
- if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
- rc = -EINVAL;
- goto job_error;
- }
-
- if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
- rc = -EINVAL;
- goto job_error;
- }
-
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- rc = -EAGAIN;
- goto job_error;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2737 Mix-and-match backward compability "
+ "between MBOX_REQ old size:%d and "
+ "new request size:%d\n",
+ (int)(job->request_len -
+ sizeof(struct fc_bsg_request)),
+ (int)sizeof(struct dfc_mbox_req));
+ mbox_req = (struct dfc_mbox_req *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ mbox_req->extMboxTag = 0;
+ mbox_req->extSeqNum = 0;
}
rc = lpfc_bsg_issue_mbox(phba, job, vport);
-job_error:
if (rc == 0) {
/* job done */
job->reply->result = 0;
@@ -3416,10 +4759,16 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
rc = lpfc_bsg_send_mgmt_rsp(job);
break;
case LPFC_BSG_VENDOR_DIAG_MODE:
- rc = lpfc_bsg_diag_mode(job);
+ rc = lpfc_bsg_diag_loopback_mode(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_MODE_END:
+ rc = lpfc_sli4_bsg_diag_mode_end(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
+ rc = lpfc_bsg_diag_loopback_run(job);
break;
- case LPFC_BSG_VENDOR_DIAG_TEST:
- rc = lpfc_bsg_diag_test(job);
+ case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
+ rc = lpfc_sli4_bsg_link_diag_test(job);
break;
case LPFC_BSG_VENDOR_GET_MGMT_REV:
rc = lpfc_bsg_get_dfc_rev(job);
@@ -3538,6 +4887,8 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
/* the mbox completion handler can now be run */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->job_done(job);
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
break;
case TYPE_MENLO:
menlo = &dd_data->context_un.menlo;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index b542aca6f5ae..c8c2b47ea886 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -24,15 +24,17 @@
* These are the vendor unique structures passed in using the bsg
* FC_BSG_HST_VENDOR message code type.
*/
-#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
-#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
-#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
-#define LPFC_BSG_VENDOR_DIAG_MODE 4
-#define LPFC_BSG_VENDOR_DIAG_TEST 5
-#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
-#define LPFC_BSG_VENDOR_MBOX 7
-#define LPFC_BSG_VENDOR_MENLO_CMD 8
-#define LPFC_BSG_VENDOR_MENLO_DATA 9
+#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
+#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
+#define LPFC_BSG_VENDOR_DIAG_MODE 4
+#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
+#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
+#define LPFC_BSG_VENDOR_MBOX 7
+#define LPFC_BSG_VENDOR_MENLO_CMD 8
+#define LPFC_BSG_VENDOR_MENLO_DATA 9
+#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
+#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
struct set_ct_event {
uint32_t command;
@@ -67,10 +69,25 @@ struct diag_mode_set {
uint32_t timeout;
};
+struct sli4_link_diag {
+ uint32_t command;
+ uint32_t timeout;
+ uint32_t test_id;
+ uint32_t loops;
+ uint32_t test_version;
+ uint32_t error_action;
+};
+
struct diag_mode_test {
uint32_t command;
};
+struct diag_status {
+ uint32_t mbox_status;
+ uint32_t shdr_status;
+ uint32_t shdr_add_status;
+};
+
#define LPFC_WWNN_TYPE 0
#define LPFC_WWPN_TYPE 1
@@ -92,11 +109,15 @@ struct get_mgmt_rev_reply {
};
#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
+
+/* BSG mailbox request header */
struct dfc_mbox_req {
uint32_t command;
uint32_t mbOffset;
uint32_t inExtWLen;
uint32_t outExtWLen;
+ uint32_t extMboxTag;
+ uint32_t extSeqNum;
};
/* Used for menlo command or menlo data. The xri is only used for menlo data */
@@ -171,7 +192,7 @@ struct lpfc_sli_config_mse {
#define lpfc_mbox_sli_config_mse_len_WORD buf_len
};
-struct lpfc_sli_config_subcmd_hbd {
+struct lpfc_sli_config_hbd {
uint32_t buf_len;
#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
@@ -194,21 +215,39 @@ struct lpfc_sli_config_hdr {
uint32_t reserved5;
};
-struct lpfc_sli_config_generic {
+struct lpfc_sli_config_emb0_subsys {
struct lpfc_sli_config_hdr sli_config_hdr;
#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
+ uint32_t padding;
+ uint32_t word64;
+#define lpfc_emb0_subcmnd_opcode_SHIFT 0
+#define lpfc_emb0_subcmnd_opcode_MASK 0xff
+#define lpfc_emb0_subcmnd_opcode_WORD word64
+#define lpfc_emb0_subcmnd_subsys_SHIFT 8
+#define lpfc_emb0_subcmnd_subsys_MASK 0xff
+#define lpfc_emb0_subcmnd_subsys_WORD word64
+/* Subsystem FCOE (0x0C) OpCodes */
+#define SLI_CONFIG_SUBSYS_FCOE 0x0C
+#define FCOE_OPCODE_READ_FCF 0x08
+#define FCOE_OPCODE_ADD_FCF 0x09
};
-struct lpfc_sli_config_subcmnd {
+struct lpfc_sli_config_emb1_subsys {
struct lpfc_sli_config_hdr sli_config_hdr;
uint32_t word6;
-#define lpfc_subcmnd_opcode_SHIFT 0
-#define lpfc_subcmnd_opcode_MASK 0xff
-#define lpfc_subcmnd_opcode_WORD word6
-#define lpfc_subcmnd_subsys_SHIFT 8
-#define lpfc_subcmnd_subsys_MASK 0xff
-#define lpfc_subcmnd_subsys_WORD word6
+#define lpfc_emb1_subcmnd_opcode_SHIFT 0
+#define lpfc_emb1_subcmnd_opcode_MASK 0xff
+#define lpfc_emb1_subcmnd_opcode_WORD word6
+#define lpfc_emb1_subcmnd_subsys_SHIFT 8
+#define lpfc_emb1_subcmnd_subsys_MASK 0xff
+#define lpfc_emb1_subcmnd_subsys_WORD word6
+/* Subsystem COMN (0x01) OpCodes */
+#define SLI_CONFIG_SUBSYS_COMN 0x01
+#define COMN_OPCODE_READ_OBJECT 0xAB
+#define COMN_OPCODE_WRITE_OBJECT 0xAC
+#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
+#define COMN_OPCODE_DELETE_OBJECT 0xAE
uint32_t timeout;
uint32_t request_length;
uint32_t word9;
@@ -222,8 +261,8 @@ struct lpfc_sli_config_subcmnd {
uint32_t rd_offset;
uint32_t obj_name[26];
uint32_t hbd_count;
-#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10
- struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
+#define LPFC_MBX_SLI_CONFIG_MAX_HBD 8
+ struct lpfc_sli_config_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
};
struct lpfc_sli_config_mbox {
@@ -235,7 +274,11 @@ struct lpfc_sli_config_mbox {
#define lpfc_mqe_command_MASK 0x000000FF
#define lpfc_mqe_command_WORD word0
union {
- struct lpfc_sli_config_generic sli_config_generic;
- struct lpfc_sli_config_subcmnd sli_config_subcmnd;
+ struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
+ struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
} un;
};
+
+/* driver only */
+#define SLI_CONFIG_NOT_HANDLED 0
+#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f0b332f4eedb..fc20c247f36b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -55,6 +55,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_supported_pages(struct lpfcMboxq *);
void lpfc_pc_sli4_params(struct lpfcMboxq *);
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
+ uint16_t, uint16_t, bool);
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
@@ -171,6 +173,7 @@ void lpfc_delayed_disc_tmo(unsigned long);
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
+void lpfc_update_vport_wwn(struct lpfc_vport *vport);
int lpfc_config_port_post(struct lpfc_hba *);
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
@@ -365,6 +368,10 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
uint32_t, uint32_t);
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
+/* SLI4 if_type 2 externs. */
+int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
+
/* externs BlockGuard */
extern char *_dump_buf_data;
extern unsigned long _dump_buf_data_order;
@@ -429,3 +436,6 @@ void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
+int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
+/* functions to support SR-IOV */
+int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d9edfd90d7ff..779b88e1469d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -352,6 +352,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
icmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
/* For GEN_REQUEST64_CR, use the RPI */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index c93fca058603..ffe82d169b40 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1665,7 +1665,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
/* Get fast-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP CQ information:\n");
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ fcp_qidx = 0;
+ do {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
@@ -1678,7 +1679,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
- }
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get mailbox queue information */
@@ -2012,7 +2013,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP complete queue */
- for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+ qidx = 0;
+ do {
if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -2024,7 +2026,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
phba->sli4_hba.fcp_cq[qidx];
goto pass_check;
}
- }
+ } while (++qidx < phba->cfg_fcp_eq_count);
goto error_out;
break;
case LPFC_IDIAG_MQ:
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e2c452467c8b..32a084534f3e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -250,7 +250,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->un.elsreq64.myID = vport->fc_myDID;
/* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = vport->vpi + phba->vpi_base;
+ icmd->ulpContext = phba->vpi_ids[vport->vpi];
icmd->ulpCt_h = 0;
/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
if (elscmd == ELS_CMD_ECHO)
@@ -454,6 +454,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
rc = -ENOMEM;
goto fail_free_dmabuf;
}
+
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
rc = -ENOMEM;
@@ -6585,6 +6586,26 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
{
struct lpfc_vport *vport;
unsigned long flags;
+ int i;
+
+ /* The physical ports are always vpi 0 - translate is unnecessary. */
+ if (vpi > 0) {
+ /*
+ * Translate the physical vpi to the logical vpi. The
+ * vport stores the logical vpi.
+ */
+ for (i = 0; i < phba->max_vpi; i++) {
+ if (vpi == phba->vpi_ids[i])
+ break;
+ }
+
+ if (i >= phba->max_vpi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "2936 Could not find Vport mapped "
+ "to vpi %d\n", vpi);
+ return NULL;
+ }
+ }
spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
@@ -6641,8 +6662,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
vport = phba->pport;
else
vport = lpfc_find_vport_by_vpid(phba,
- icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
+ icmd->unsli3.rcvsli3.vpi);
}
+
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
@@ -7222,7 +7244,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
/* Set the ulpContext to the vpi */
- elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
+ elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
} else {
/* For FDISC, Let FDISC rsp set the NPortID for this VPI */
icmd->ulpCt_h = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7a35df5e2038..18d0dbfda2bc 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -881,7 +881,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
/* Clean up any firmware default rpi's */
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
- lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
+ lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
mb->vport = vport;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
@@ -2690,16 +2690,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
sizeof (struct serv_parm));
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof(vport->fc_nodename));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(vport->fc_portname));
+ lpfc_update_vport_wwn(vport);
if (vport->port_type == LPFC_PHYSICAL_PORT) {
memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
@@ -3430,7 +3421,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3504,7 +3496,8 @@ out:
return;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3591,7 +3584,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
-
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
@@ -4106,11 +4098,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
+ uint16_t rpi;
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
+ /* SLI4 ports require the physical rpi value. */
+ rpi = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4179,7 +4176,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
+ lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
+ mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -4203,7 +4201,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
+ lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
+ mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -4653,10 +4652,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
if (num_sent)
return;
- /*
- * For SLI3, cmpl_reg_vpi will set port_state to READY, and
- * continue discovery.
- */
+ /* Register the VPI for SLI3, NON-NPIV only. */
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
@@ -4943,7 +4939,7 @@ restart_disc:
if (phba->sli_rev < LPFC_SLI_REV4) {
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
lpfc_issue_reg_vpi(phba, vport);
- else { /* NPIV Not enabled */
+ else {
lpfc_issue_clear_la(phba, vport);
vport->port_state = LPFC_VPORT_READY;
}
@@ -5069,7 +5065,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
pmb->context2 = NULL;
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -5354,6 +5351,17 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
+ /*
+ * IF the CVL_RCVD bit is not set then we have sent the
+ * flogi.
+ * If dev_loss fires while we are waiting we do not want to
+ * unreg the fcf.
+ */
+ if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
+ spin_unlock_irq(shost->host_lock);
+ ret = 1;
+ goto out;
+ }
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 86b6f7e6686a..9059524cf225 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -64,6 +64,8 @@
#define SLI3_IOCB_CMD_SIZE 128
#define SLI3_IOCB_RSP_SIZE 64
+#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff
+#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff
/* vendor ID used in SCSI netlink calls */
#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
@@ -903,6 +905,8 @@ struct RRQ { /* Structure is in Big Endian format */
#define rrq_rxid_WORD rrq_exchg
};
+#define LPFC_MAX_VFN_PER_PFN 255 /* Maximum VFs allowed per ARI */
+#define LPFC_DEF_VFN_PER_PFN 0 /* Default VFs due to platform limitation*/
struct RTV_RSP { /* Structure is in Big Endian format */
uint32_t ratov;
@@ -1199,7 +1203,9 @@ typedef struct {
#define PCI_DEVICE_ID_BALIUS 0xe131
#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
#define PCI_DEVICE_ID_LANCER_FC 0xe200
+#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208
#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
+#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
@@ -3021,7 +3027,7 @@ typedef struct {
#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
#define MAILBOX_HBA_EXT_OFFSET 0x100
/* max mbox xmit size is a page size for sysfs IO operations */
-#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE
+#define MAILBOX_SYSFS_MAX 4096
typedef union {
uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4dff668ebdad..11e26a26b5d1 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,6 +170,25 @@ struct lpfc_sli_intf {
#define LPFC_PCI_FUNC3 3
#define LPFC_PCI_FUNC4 4
+/* SLI4 interface type-2 control register offsets */
+#define LPFC_CTL_PORT_SEM_OFFSET 0x400
+#define LPFC_CTL_PORT_STA_OFFSET 0x404
+#define LPFC_CTL_PORT_CTL_OFFSET 0x408
+#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
+#define LPFC_CTL_PORT_ER2_OFFSET 0x410
+#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
+
+/* Some SLI4 interface type-2 PDEV_CTL register bits */
+#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
+#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
+#define LPFC_CTL_PDEV_CTL_DD 0x00000004
+#define LPFC_CTL_PDEV_CTL_LC 0x00000008
+#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
+#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
+#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
+
+#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
+
/* Active interrupt test count */
#define LPFC_ACT_INTR_CNT 4
@@ -210,9 +229,26 @@ struct ulp_bde64 {
struct lpfc_sli4_flags {
uint32_t word0;
-#define lpfc_fip_flag_SHIFT 0
-#define lpfc_fip_flag_MASK 0x00000001
-#define lpfc_fip_flag_WORD word0
+#define lpfc_idx_rsrc_rdy_SHIFT 0
+#define lpfc_idx_rsrc_rdy_MASK 0x00000001
+#define lpfc_idx_rsrc_rdy_WORD word0
+#define LPFC_IDX_RSRC_RDY 1
+#define lpfc_xri_rsrc_rdy_SHIFT 1
+#define lpfc_xri_rsrc_rdy_MASK 0x00000001
+#define lpfc_xri_rsrc_rdy_WORD word0
+#define LPFC_XRI_RSRC_RDY 1
+#define lpfc_rpi_rsrc_rdy_SHIFT 2
+#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_rpi_rsrc_rdy_WORD word0
+#define LPFC_RPI_RSRC_RDY 1
+#define lpfc_vpi_rsrc_rdy_SHIFT 3
+#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vpi_rsrc_rdy_WORD word0
+#define LPFC_VPI_RSRC_RDY 1
+#define lpfc_vfi_rsrc_rdy_SHIFT 4
+#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vfi_rsrc_rdy_WORD word0
+#define LPFC_VFI_RSRC_RDY 1
};
struct sli4_bls_rsp {
@@ -739,6 +775,12 @@ union lpfc_sli4_cfg_shdr {
#define lpfc_mbox_hdr_version_SHIFT 0
#define lpfc_mbox_hdr_version_MASK 0x000000FF
#define lpfc_mbox_hdr_version_WORD word9
+#define lpfc_mbox_hdr_pf_num_SHIFT 16
+#define lpfc_mbox_hdr_pf_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_pf_num_WORD word9
+#define lpfc_mbox_hdr_vh_num_SHIFT 24
+#define lpfc_mbox_hdr_vh_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_vh_num_WORD word9
#define LPFC_Q_CREATE_VERSION_2 2
#define LPFC_Q_CREATE_VERSION_1 1
#define LPFC_Q_CREATE_VERSION_0 0
@@ -766,12 +808,22 @@ union lpfc_sli4_cfg_shdr {
} response;
};
-/* Mailbox structures */
+/* Mailbox Header structures.
+ * struct mbox_header is defined for first generation SLI4_CFG mailbox
+ * calls deployed for BE-based ports.
+ *
+ * struct sli4_mbox_header is defined for second generation SLI4
+ * ports that don't deploy the SLI4_CFG mechanism.
+ */
struct mbox_header {
struct lpfc_sli4_cfg_mhdr cfg_mhdr;
union lpfc_sli4_cfg_shdr cfg_shdr;
};
+#define LPFC_EXTENT_LOCAL 0
+#define LPFC_TIMEOUT_DEFAULT 0
+#define LPFC_EXTENT_VERSION_DEFAULT 0
+
/* Subsystem Definitions */
#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
@@ -794,6 +846,13 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
+#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
+#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
+#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
+#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
+#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
/* FCoE Opcodes */
@@ -808,6 +867,8 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
/* Mailbox command structures */
struct eq_context {
@@ -1210,6 +1271,187 @@ struct lpfc_mbx_mq_destroy {
} u;
};
+/* Start Gen 2 SLI4 Mailbox definitions: */
+
+/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
+#define LPFC_RSC_TYPE_FCOE_VFI 0x20
+#define LPFC_RSC_TYPE_FCOE_VPI 0x21
+#define LPFC_RSC_TYPE_FCOE_RPI 0x22
+#define LPFC_RSC_TYPE_FCOE_XRI 0x23
+
+struct lpfc_mbx_get_rsrc_extent_info {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4
+#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16
+#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4
+ } rsp;
+ } u;
+};
+
+struct lpfc_id_range {
+ uint32_t word5;
+#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
+#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_0_WORD word5
+#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
+#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_1_WORD word5
+};
+
+struct lpfc_mbx_set_link_diag_state {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_state_diag_SHIFT 0
+#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
+#define lpfc_mbx_set_diag_state_diag_WORD word0
+#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_state_link_num_WORD word0
+#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_state_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_set_link_diag_loopback {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
+#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
+#define lpfc_mbx_set_diag_lpbk_type_WORD word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_run_link_diag_test {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_run_diag_test_link_num_SHIFT 16
+#define lpfc_mbx_run_diag_test_link_num_MASK 0x0000003F
+#define lpfc_mbx_run_diag_test_link_num_WORD word0
+#define lpfc_mbx_run_diag_test_link_type_SHIFT 22
+#define lpfc_mbx_run_diag_test_link_type_MASK 0x00000003
+#define lpfc_mbx_run_diag_test_link_type_WORD word0
+ uint32_t word1;
+#define lpfc_mbx_run_diag_test_test_id_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_id_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_id_WORD word1
+#define lpfc_mbx_run_diag_test_loops_SHIFT 16
+#define lpfc_mbx_run_diag_test_loops_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_loops_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_run_diag_test_test_ver_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_ver_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_ver_WORD word2
+#define lpfc_mbx_run_diag_test_err_act_SHIFT 16
+#define lpfc_mbx_run_diag_test_err_act_MASK 0x000000FF
+#define lpfc_mbx_run_diag_test_err_act_WORD word2
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+/*
+ * struct lpfc_mbx_alloc_rsrc_extents:
+ * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
+ * 6 words of header + 4 words of shared subcommand header +
+ * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
+ *
+ * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
+ * for extents payload.
+ *
+ * 212/2 (bytes per extent) = 106 extents.
+ * 106/2 (extents per word) = 53 words.
+ * lpfc_id_range id is statically size to 53.
+ *
+ * This mailbox definition is used for ALLOC or GET_ALLOCATED
+ * extent ranges. For ALLOC, the type and cnt are required.
+ * For GET_ALLOCATED, only the type is required.
+ */
+struct lpfc_mbx_alloc_rsrc_extents {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4
+#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16
+#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_rsrc_cnt_SHIFT 0
+#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_cnt_WORD word4
+ struct lpfc_id_range id[53];
+ } rsp;
+ } u;
+};
+
+/*
+ * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
+ * structure shares the same SHIFT/MASK/WORD defines provided in the
+ * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
+ * the structures defined above. This non-embedded structure provides for the
+ * maximum number of extents supported by the port.
+ */
+struct lpfc_mbx_nembed_rsrc_extent {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ uint32_t word4;
+ struct lpfc_id_range id;
+};
+
+struct lpfc_mbx_dealloc_rsrc_extents {
+ struct mbox_header header;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4
+ } req;
+
+};
+
+/* Start SLI4 FCoE specific mbox structures. */
+
struct lpfc_mbx_post_hdr_tmpl {
struct mbox_header header;
uint32_t word10;
@@ -1229,7 +1471,7 @@ struct sli4_sge { /* SLI-4 */
uint32_t word2;
#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
-#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
+#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF
#define lpfc_sli4_sge_offset_WORD word2
#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
this flag !! */
@@ -1773,61 +2015,31 @@ struct lpfc_mbx_read_rev {
struct lpfc_mbx_read_config {
uint32_t word1;
-#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
-#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_max_bbc_WORD word1
-#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
-#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_init_bbc_WORD word1
+#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
+#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
+#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
uint32_t word2;
-#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
-#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
-#define lpfc_mbx_rd_conf_nport_did_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
- uint32_t word3;
-#define lpfc_mbx_rd_conf_ao_SHIFT 0
-#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
-#define lpfc_mbx_rd_conf_ao_WORD word3
-#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
-#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_bb_scn_WORD word3
-#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
-#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
-#define lpfc_mbx_rd_conf_mc_SHIFT 29
-#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
-#define lpfc_mbx_rd_conf_mc_WORD word3
+ uint32_t rsvd_3;
uint32_t word4;
#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
- uint32_t word5;
-#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
-#define lpfc_mbx_rd_conf_lp_tov_WORD word5
+ uint32_t rsvd_5;
uint32_t word6;
#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
- uint32_t word7;
-#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
- uint32_t word8;
-#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_al_tov_WORD word8
+ uint32_t rsvd_7;
+ uint32_t rsvd_8;
uint32_t word9;
#define lpfc_mbx_rd_conf_lmt_SHIFT 0
#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_lmt_WORD word9
- uint32_t word10;
-#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
-#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_max_alpa_WORD word10
- uint32_t word11_rsvd;
+ uint32_t rsvd_10;
+ uint32_t rsvd_11;
uint32_t word12;
#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
@@ -1857,9 +2069,6 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_vfi_count_WORD word15
uint32_t word16;
-#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
-#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
-#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
@@ -2169,6 +2378,12 @@ struct lpfc_sli4_parameters {
#define cfg_fcoe_SHIFT 0
#define cfg_fcoe_MASK 0x00000001
#define cfg_fcoe_WORD word12
+#define cfg_ext_SHIFT 1
+#define cfg_ext_MASK 0x00000001
+#define cfg_ext_WORD word12
+#define cfg_hdrr_SHIFT 2
+#define cfg_hdrr_MASK 0x00000001
+#define cfg_hdrr_WORD word12
#define cfg_phwq_SHIFT 15
#define cfg_phwq_MASK 0x00000001
#define cfg_phwq_WORD word12
@@ -2198,6 +2413,145 @@ struct lpfc_mbx_get_sli4_parameters {
struct lpfc_sli4_parameters sli4_parameters;
};
+struct lpfc_rscr_desc_generic {
+#define LPFC_RSRC_DESC_WSIZE 18
+ uint32_t desc[LPFC_RSRC_DESC_WSIZE];
+};
+
+struct lpfc_rsrc_desc_pcie {
+ uint32_t word0;
+#define lpfc_rsrc_desc_pcie_type_SHIFT 0
+#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
+ uint32_t word1;
+#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
+#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pfnum_WORD word1
+ uint32_t reserved;
+ uint32_t word3;
+#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT 0
+#define lpfc_rsrc_desc_pcie_sriov_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_sriov_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT 8
+#define lpfc_rsrc_desc_pcie_pf_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_type_SHIFT 16
+#define lpfc_rsrc_desc_pcie_pf_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_type_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT 0
+#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK 0x0000ffff
+#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD word4
+};
+
+struct lpfc_rsrc_desc_fcfcoe {
+ uint32_t word0;
+#define lpfc_rsrc_desc_fcfcoe_type_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
+ uint32_t word1;
+#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD word1
+#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK 0x000007ff
+#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD word1
+ uint32_t word2;
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD word2
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD word2
+ uint32_t word3;
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD word3
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD word4
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD word4
+ uint32_t word5;
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD word5
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD word5
+ uint32_t word6;
+ uint32_t word7;
+ uint32_t word8;
+ uint32_t word9;
+ uint32_t word10;
+ uint32_t word11;
+ uint32_t word12;
+ uint32_t word13;
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK 0x0000003f
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT 6
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK 0x00000003
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT 8
+#define lpfc_rsrc_desc_fcfcoe_lmc_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lmc_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT 9
+#define lpfc_rsrc_desc_fcfcoe_lld_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lld_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
+};
+
+struct lpfc_func_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_func_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ struct lpfc_func_cfg func_cfg;
+};
+
+struct lpfc_prof_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_prof_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ union {
+ struct {
+ uint32_t word10;
+#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT 0
+#define lpfc_mbx_get_prof_cfg_prof_id_MASK 0x000000ff
+#define lpfc_mbx_get_prof_cfg_prof_id_WORD word10
+#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT 8
+#define lpfc_mbx_get_prof_cfg_prof_tp_MASK 0x00000003
+#define lpfc_mbx_get_prof_cfg_prof_tp_WORD word10
+ } request;
+ struct {
+ struct lpfc_prof_cfg prof_cfg;
+ } response;
+ } u;
+};
+
/* Mailbox Completion Queue Error Messages */
#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2206,6 +2560,29 @@ struct lpfc_mbx_get_sli4_parameters {
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
#define MB_CQE_STATUS_DMA_FAILED 0x5
+#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
+struct lpfc_mbx_wr_object {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_wr_object_eof_SHIFT 31
+#define lpfc_wr_object_eof_MASK 0x00000001
+#define lpfc_wr_object_eof_WORD word4
+#define lpfc_wr_object_write_length_SHIFT 0
+#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
+#define lpfc_wr_object_write_length_WORD word4
+ uint32_t write_offset;
+ uint32_t object_name[26];
+ uint32_t bde_count;
+ struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
+ } request;
+ struct {
+ uint32_t actual_write_length;
+ } response;
+ } u;
+};
+
/* mailbox queue entry structure */
struct lpfc_mqe {
uint32_t word0;
@@ -2241,6 +2618,9 @@ struct lpfc_mqe {
struct lpfc_mbx_cq_destroy cq_destroy;
struct lpfc_mbx_wq_destroy wq_destroy;
struct lpfc_mbx_rq_destroy rq_destroy;
+ struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
+ struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
+ struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
struct lpfc_mbx_post_sgl_pages post_sgl_pages;
struct lpfc_mbx_nembed_cmd nembed_cmd;
struct lpfc_mbx_read_rev read_rev;
@@ -2252,7 +2632,13 @@ struct lpfc_mqe {
struct lpfc_mbx_supp_pages supp_pages;
struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+ struct lpfc_mbx_set_link_diag_state link_diag_state;
+ struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
+ struct lpfc_mbx_run_link_diag_test link_diag_test;
+ struct lpfc_mbx_get_func_cfg get_func_cfg;
+ struct lpfc_mbx_get_prof_cfg get_prof_cfg;
struct lpfc_mbx_nop nop;
+ struct lpfc_mbx_wr_object wr_object;
} un;
};
@@ -2458,7 +2844,7 @@ struct lpfc_bmbx_create {
#define SGL_ALIGN_SZ 64
#define SGL_PAGE_SIZE 4096
/* align SGL addr on a size boundary - adjust address up */
-#define NO_XRI ((uint16_t)-1)
+#define NO_XRI 0xffff
struct wqe_common {
uint32_t word6;
@@ -2798,9 +3184,28 @@ union lpfc_wqe {
struct gen_req64_wqe gen_req;
};
+#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
+#define LPFC_FILE_TYPE_GROUP 0xf7
+#define LPFC_FILE_ID_GROUP 0xa2
+struct lpfc_grp_hdr {
+ uint32_t size;
+ uint32_t magic_number;
+ uint32_t word2;
+#define lpfc_grp_hdr_file_type_SHIFT 24
+#define lpfc_grp_hdr_file_type_MASK 0x000000FF
+#define lpfc_grp_hdr_file_type_WORD word2
+#define lpfc_grp_hdr_id_SHIFT 16
+#define lpfc_grp_hdr_id_MASK 0x000000FF
+#define lpfc_grp_hdr_id_WORD word2
+ uint8_t rev_name[128];
+};
+
#define FCP_COMMAND 0x0
#define FCP_COMMAND_DATA_OUT 0x1
#define ELS_COMMAND_NON_FIP 0xC
#define ELS_COMMAND_FIP 0xD
#define OTHER_COMMAND 0x8
+#define LPFC_FW_DUMP 1
+#define LPFC_FW_RESET 2
+#define LPFC_DV_RESET 3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dda036a1af3..148b98ddbb1d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -30,6 +30,7 @@
#include <linux/ctype.h>
#include <linux/aer.h>
#include <linux/slab.h>
+#include <linux/firmware.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -211,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
if (!lpfc_vpd_data)
goto out_free_mbox;
-
do {
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -309,6 +309,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
}
/**
+ * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
+ * cfg_soft_wwnn, cfg_soft_wwpn
+ * @vport: pointer to lpfc vport data structure.
+ *
+ *
+ * Return codes
+ * None.
+ **/
+void
+lpfc_update_vport_wwn(struct lpfc_vport *vport)
+{
+ /* If the soft name exists then update it using the service params */
+ if (vport->phba->cfg_soft_wwnn)
+ u64_to_wwn(vport->phba->cfg_soft_wwnn,
+ vport->fc_sparam.nodeName.u.wwn);
+ if (vport->phba->cfg_soft_wwpn)
+ u64_to_wwn(vport->phba->cfg_soft_wwpn,
+ vport->fc_sparam.portName.u.wwn);
+
+ /*
+ * If the name is empty or there exists a soft name
+ * then copy the service params name, otherwise use the fc name
+ */
+ if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
+ sizeof(struct lpfc_name));
+
+ if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
+ sizeof(struct lpfc_name));
+}
+
+/**
* lpfc_config_port_post - Perform lpfc initialization after config port
* @phba: pointer to lpfc hba data structure.
*
@@ -377,17 +416,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
pmb->context1 = NULL;
-
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof (struct lpfc_name));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
+ lpfc_update_vport_wwn(vport);
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
@@ -573,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Clear all pending interrupts */
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
-
phba->link_state = LPFC_HBA_ERROR;
if (rc != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
@@ -1755,7 +1783,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
&& descp && descp[0] != '\0')
return;
- if (phba->lmt & LMT_10Gb)
+ if (phba->lmt & LMT_16Gb)
+ max_speed = 16;
+ else if (phba->lmt & LMT_10Gb)
max_speed = 10;
else if (phba->lmt & LMT_8Gb)
max_speed = 8;
@@ -1922,12 +1952,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
"Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LANCER_FC:
- oneConnect = 1;
- m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
+ case PCI_DEVICE_ID_LANCER_FC_VF:
+ m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LANCER_FCOE:
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
oneConnect = 1;
- m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
+ m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
break;
default:
m = (typeof(m)){"Unknown", "", ""};
@@ -1936,7 +1967,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
if (mdp && mdp[0] == '\0')
snprintf(mdp, 79,"%s", m.name);
- /* oneConnect hba requires special processing, they are all initiators
+ /*
+ * oneConnect hba requires special processing, they are all initiators
* and we put the port number on the end
*/
if (descp && descp[0] == '\0') {
@@ -2656,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
kfree(io);
phba->total_iocbq_bufs--;
}
+
spin_unlock_irq(&phba->hbalock);
return 0;
}
@@ -3612,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2718 Clear Virtual Link Received for VPI 0x%x"
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
+
vport = lpfc_find_vport_by_vpid(phba,
acqe_fip->index - phba->vpi_base);
ndlp = lpfc_sli4_perform_vport_cvl(vport);
@@ -3935,6 +3969,10 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
pci_try_set_mwi(pdev);
pci_save_state(pdev);
+ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+ if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
+ pdev->needs_freset = 1;
+
return 0;
out_disable_device:
@@ -3997,6 +4035,36 @@ lpfc_reset_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ * @nr_vfn: number of virtual functions to be enabled.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+int
+lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
+{
+ struct pci_dev *pdev = phba->pcidev;
+ int rc;
+
+ rc = pci_enable_sriov(pdev, nr_vfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2806 Failed to enable sriov on this device "
+ "with vfn number nr_vf:%d, rc:%d\n",
+ nr_vfn, rc);
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2807 Successful enable sriov on this device "
+ "with vfn number nr_vf:%d\n", nr_vfn);
+ return rc;
+}
+
+/**
* lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
* @phba: pointer to lpfc hba data structure.
*
@@ -4011,6 +4079,7 @@ static int
lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
+ int rc;
/*
* Initialize timers used by driver
@@ -4085,6 +4154,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
return -ENOMEM;
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2808 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
return 0;
}
@@ -4161,6 +4247,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->fcf.redisc_wait.data = (unsigned long)phba;
/*
+ * Control structure for handling external multi-buffer mailbox
+ * command pass-through.
+ */
+ memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ /*
* We need to do a READ_CONFIG mailbox command here before
* calling lpfc_get_cfgparam. For VFs this will report the
* MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
@@ -4233,7 +4327,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
/*
- * Initialize dirver internal slow-path work queues
+ * Initialize driver internal slow-path work queues
*/
/* Driver internel slow-path CQ Event pool */
@@ -4249,6 +4343,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Receive queue CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
+ /* Initialize extent block lists. */
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
+ INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
+
/* Initialize the driver internal SLI layer lists. */
lpfc_sli_setup(phba);
lpfc_sli_queue_setup(phba);
@@ -4323,9 +4423,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
/*
* Get sli4 parameters that override parameters from Port capabilities.
- * If this call fails it is not a critical error so continue loading.
+ * If this call fails, it isn't critical unless the SLI4 parameters come
+ * back in conflict.
*/
- lpfc_get_sli4_parameters(phba, mboxq);
+ rc = lpfc_get_sli4_parameters(phba, mboxq);
+ if (rc) {
+ if (phba->sli4_hba.extents_in_use &&
+ phba->sli4_hba.rpi_hdrs_in_use) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2999 Unsupported SLI4 Parameters "
+ "Extents and RPI headers enabled.\n");
+ goto out_free_bsmbx;
+ }
+ }
mempool_free(mboxq, phba->mbox_mem_pool);
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
@@ -4350,7 +4460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
"1430 Failed to initialize sgl list.\n");
goto out_free_sgl_list;
}
-
rc = lpfc_sli4_init_rpi_hdrs(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4366,6 +4475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2759 Failed allocate memory for FCF round "
"robin failover bmask\n");
+ rc = -ENOMEM;
goto out_remove_rpi_hdrs;
}
@@ -4375,6 +4485,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for fast-path "
"per-EQ handle array\n");
+ rc = -ENOMEM;
goto out_free_fcf_rr_bmask;
}
@@ -4384,9 +4495,27 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2573 Failed allocate memory for msi-x "
"interrupt vector entries\n");
+ rc = -ENOMEM;
goto out_free_fcp_eq_hdl;
}
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3020 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
return rc;
out_free_fcp_eq_hdl:
@@ -4449,6 +4578,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
lpfc_sli4_cq_event_release_all(phba);
lpfc_sli4_cq_event_pool_destroy(phba);
+ /* Release resource identifiers. */
+ lpfc_sli4_dealloc_resource_identifiers(phba);
+
/* Free the bsmbx region. */
lpfc_destroy_bootstrap_mbox(phba);
@@ -4649,6 +4781,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
"Unloading driver.\n", __func__);
goto out_free_iocbq;
}
+ iocbq_entry->sli4_lxritag = NO_XRI;
iocbq_entry->sli4_xritag = NO_XRI;
spin_lock_irq(&phba->hbalock);
@@ -4746,7 +4879,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2400 lpfc_init_sgl_list els %d.\n",
+ "2400 ELS XRI count %d.\n",
els_xri_cnt);
/* Initialize and populate the sglq list per host/VF. */
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
@@ -4779,7 +4912,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
phba->sli4_hba.scsi_xri_max =
phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
phba->sli4_hba.scsi_xri_cnt = 0;
-
phba->sli4_hba.lpfc_scsi_psb_array =
kzalloc((sizeof(struct lpfc_scsi_buf *) *
phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
@@ -4802,13 +4934,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
goto out_free_mem;
}
- sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
- if (sglq_entry->sli4_xritag == NO_XRI) {
- kfree(sglq_entry);
- printk(KERN_ERR "%s: failed to allocate XRI.\n"
- "Unloading driver.\n", __func__);
- goto out_free_mem;
- }
sglq_entry->buff_type = GEN_BUFF_TYPE;
sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
if (sglq_entry->virt == NULL) {
@@ -4857,24 +4982,20 @@ int
lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
{
int rc = 0;
- int longs;
- uint16_t rpi_count;
struct lpfc_rpi_hdr *rpi_hdr;
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
-
/*
- * Provision an rpi bitmask range for discovery. The total count
- * is the difference between max and base + 1.
+ * If the SLI4 port supports extents, posting the rpi header isn't
+ * required. Set the expected maximum count and let the actual value
+ * get set when extents are fully allocated.
*/
- rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
- phba->sli4_hba.max_cfg_param.max_rpi - 1;
-
- longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
- phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
- GFP_KERNEL);
- if (!phba->sli4_hba.rpi_bmask)
- return -ENOMEM;
+ if (!phba->sli4_hba.rpi_hdrs_in_use) {
+ phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ return rc;
+ }
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
@@ -4908,11 +5029,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
struct lpfc_rpi_hdr *rpi_hdr;
uint32_t rpi_count;
+ /*
+ * If the SLI4 port supports extents, posting the rpi header isn't
+ * required. Set the expected maximum count and let the actual value
+ * get set when extents are fully allocated.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return NULL;
+ if (phba->sli4_hba.extents_in_use)
+ return NULL;
+
+ /* The limit on the logical index is just the max_rpi count. */
rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
- phba->sli4_hba.max_cfg_param.max_rpi - 1;
+ phba->sli4_hba.max_cfg_param.max_rpi - 1;
spin_lock_irq(&phba->hbalock);
- curr_rpi_range = phba->sli4_hba.next_rpi;
+ /*
+ * Establish the starting RPI in this header block. The starting
+ * rpi is normalized to a zero base because the physical rpi is
+ * port based.
+ */
+ curr_rpi_range = phba->sli4_hba.next_rpi -
+ phba->sli4_hba.max_cfg_param.rpi_base;
spin_unlock_irq(&phba->hbalock);
/*
@@ -4925,6 +5063,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
else
rpi_count = LPFC_RPI_HDR_COUNT;
+ if (!rpi_count)
+ return NULL;
/*
* First allocate the protocol header region for the port. The
* port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -4957,12 +5097,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
rpi_hdr->page_count = 1;
spin_lock_irq(&phba->hbalock);
- rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
+
+ /* The rpi_hdr stores the logical index only. */
+ rpi_hdr->start_rpi = curr_rpi_range;
list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
/*
- * The next_rpi stores the next module-64 rpi value to post
- * in any subsequent rpi memory region postings.
+ * The next_rpi stores the next logical module-64 rpi value used
+ * to post physical rpis in subsequent rpi postings.
*/
phba->sli4_hba.next_rpi += rpi_count;
spin_unlock_irq(&phba->hbalock);
@@ -4981,15 +5123,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to remove all memory resources allocated
- * to support rpis. This routine presumes the caller has released all
- * rpis consumed by fabric or port logins and is prepared to have
- * the header pages removed.
+ * to support rpis for SLI4 ports not supporting extents. This routine
+ * presumes the caller has released all rpis consumed by fabric or port
+ * logins and is prepared to have the header pages removed.
**/
void
lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
{
struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+
list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
&phba->sli4_hba.lpfc_rpi_hdr_list, list) {
list_del(&rpi_hdr->list);
@@ -4998,9 +5143,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
kfree(rpi_hdr->dmabuf);
kfree(rpi_hdr);
}
-
- phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
- memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
+ exit:
+ /* There are no rpis available to the port now. */
+ phba->sli4_hba.next_rpi = 0;
}
/**
@@ -5487,7 +5632,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
/* Final checks. The port status should be clean. */
if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&reg_data.word0) ||
- bf_get(lpfc_sliport_status_err, &reg_data)) {
+ (bf_get(lpfc_sliport_status_err, &reg_data) &&
+ !bf_get(lpfc_sliport_status_rn, &reg_data))) {
phba->work_status[0] =
readl(phba->sli4_hba.u.if_type2.
ERR1regaddr);
@@ -5741,7 +5887,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmb;
struct lpfc_mbx_read_config *rd_config;
- uint32_t rc = 0;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct lpfc_mbx_get_func_cfg *get_func_cfg;
+ struct lpfc_rsrc_desc_fcfcoe *desc;
+ uint32_t desc_count;
+ int length, i, rc = 0;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
@@ -5763,6 +5914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
rc = -EIO;
} else {
rd_config = &pmb->u.mqe.un.rd_config;
+ phba->sli4_hba.extents_in_use =
+ bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
phba->sli4_hba.max_cfg_param.xri_base =
@@ -5781,8 +5934,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_fcfi =
bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
- phba->sli4_hba.max_cfg_param.fcfi_base =
- bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_eq =
bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
phba->sli4_hba.max_cfg_param.max_rq =
@@ -5800,11 +5951,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2003 cfg params XRI(B:%d M:%d), "
+ "2003 cfg params Extents? %d "
+ "XRI(B:%d M:%d), "
"VPI(B:%d M:%d) "
"VFI(B:%d M:%d) "
"RPI(B:%d M:%d) "
- "FCFI(B:%d M:%d)\n",
+ "FCFI(Count:%d)\n",
+ phba->sli4_hba.extents_in_use,
phba->sli4_hba.max_cfg_param.xri_base,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.max_cfg_param.vpi_base,
@@ -5813,10 +5966,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.max_vfi,
phba->sli4_hba.max_cfg_param.rpi_base,
phba->sli4_hba.max_cfg_param.max_rpi,
- phba->sli4_hba.max_cfg_param.fcfi_base,
phba->sli4_hba.max_cfg_param.max_fcfi);
}
- mempool_free(pmb, phba->mbox_mem_pool);
+
+ if (rc)
+ goto read_cfg_out;
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
if (phba->cfg_hba_queue_depth >
@@ -5825,6 +5979,65 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->cfg_hba_queue_depth =
phba->sli4_hba.max_cfg_param.max_xri -
lpfc_sli4_get_els_iocb_cnt(phba);
+
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ goto read_cfg_out;
+
+ /* get the pf# and vf# for SLI4 if_type 2 port */
+ length = (sizeof(struct lpfc_mbx_get_func_cfg) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc || shdr_status || shdr_add_status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3026 Mailbox failed , mbxCmd x%x "
+ "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe),
+ bf_get(lpfc_mqe_status, &pmb->u.mqe));
+ rc = -EIO;
+ goto read_cfg_out;
+ }
+
+ /* search for fc_fcoe resrouce descriptor */
+ get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
+ desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
+
+ for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+ desc = (struct lpfc_rsrc_desc_fcfcoe *)
+ &get_func_cfg->func_cfg.desc[i];
+ if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
+ bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+ phba->sli4_hba.iov.pf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
+ phba->sli4_hba.iov.vf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
+ break;
+ }
+ }
+
+ if (i < LPFC_RSRC_DESC_MAX_NUM)
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
+ "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
+ phba->sli4_hba.iov.vf_number);
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3028 GET_FUNCTION_CONFIG: failed to find "
+ "Resrouce Descriptor:x%x\n",
+ LPFC_RSRC_DESC_TYPE_FCFCOE);
+ rc = -EIO;
+ }
+
+read_cfg_out:
+ mempool_free(pmb, phba->mbox_mem_pool);
return rc;
}
@@ -6229,8 +6442,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.mbx_cq = NULL;
/* Release FCP response complete queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+ fcp_qidx = 0;
+ do
lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+ while (++fcp_qidx < phba->cfg_fcp_eq_count);
kfree(phba->sli4_hba.fcp_cq);
phba->sli4_hba.fcp_cq = NULL;
@@ -6353,16 +6568,24 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path FCP Response Complete Queue */
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+ fcp_cqidx = 0;
+ do {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0526 Fast-path FCP CQ (%d) not "
"allocated\n", fcp_cqidx);
goto out_destroy_fcp_cq;
}
- rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
- phba->sli4_hba.fp_eq[fcp_cqidx],
- LPFC_WCQ, LPFC_FCP);
+ if (phba->cfg_fcp_eq_count)
+ rc = lpfc_cq_create(phba,
+ phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.fp_eq[fcp_cqidx],
+ LPFC_WCQ, LPFC_FCP);
+ else
+ rc = lpfc_cq_create(phba,
+ phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.sp_eq,
+ LPFC_WCQ, LPFC_FCP);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0527 Failed setup of fast-path FCP "
@@ -6371,12 +6594,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2588 FCP CQ setup: cq[%d]-id=%d, "
- "parent eq[%d]-id=%d\n",
+ "parent %seq[%d]-id=%d\n",
fcp_cqidx,
phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+ (phba->cfg_fcp_eq_count) ? "" : "sp_",
fcp_cqidx,
- phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
- }
+ (phba->cfg_fcp_eq_count) ?
+ phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
+ phba->sli4_hba.sp_eq->queue_id);
+ } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
/*
* Set up all the Work Queues (WQs)
@@ -6445,7 +6671,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
fcp_cq_index,
phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
/* Round robin FCP Work Queue's Completion Queue assignment */
- fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
+ if (phba->cfg_fcp_eq_count)
+ fcp_cq_index = ((fcp_cq_index + 1) %
+ phba->cfg_fcp_eq_count);
}
/*
@@ -6827,6 +7055,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
if (rdy_chk < 1000)
break;
}
+ /* delay driver action following IF_TYPE_2 function reset */
+ msleep(100);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -7419,11 +7649,15 @@ enable_msix_vectors:
/*
* Assign MSI-X vectors to interrupt handlers
*/
-
- /* The first vector must associated to slow-path handler for MQ */
- rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
- &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
- LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ if (vectors > 1)
+ rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+ &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
+ LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ else
+ /* All Interrupts need to be handled by one EQ */
+ rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+ &lpfc_sli4_intr_handler, IRQF_SHARED,
+ LPFC_DRIVER_NAME, phba);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0485 MSI-X slow-path request_irq failed "
@@ -7765,6 +7999,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
{
int wait_cnt = 0;
LPFC_MBOXQ_t *mboxq;
+ struct pci_dev *pdev = phba->pcidev;
lpfc_stop_hba_timers(phba);
phba->sli4_hba.intr_enable = 0;
@@ -7804,6 +8039,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
/* Stop kthread signal shall trigger work_done one more time */
kthread_stop(phba->worker_thread);
@@ -7878,6 +8117,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
return rc;
}
@@ -7902,6 +8146,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
int length;
struct lpfc_sli4_parameters *mbx_sli4_parameters;
+ /*
+ * By default, the driver assumes the SLI4 port requires RPI
+ * header postings. The SLI4_PARAM response will correct this
+ * assumption.
+ */
+ phba->sli4_hba.rpi_hdrs_in_use = 1;
+
/* Read the port's SLI4 Config Parameters */
length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -7938,6 +8189,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mbx_sli4_parameters);
sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
mbx_sli4_parameters);
+ phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
+ phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
return 0;
}
@@ -8173,6 +8431,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
lpfc_debugfs_terminate(vport);
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
/* Disable interrupt */
lpfc_sli_disable_intr(phba);
@@ -8565,6 +8827,97 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
}
/**
+ * lpfc_write_firmware - attempt to write a firmware image to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @fw: pointer to firmware image returned from request_firmware.
+ *
+ * returns the number of bytes written if write is successful.
+ * returns a negative error value if there were errors.
+ * returns 0 if firmware matches currently active firmware on port.
+ **/
+int
+lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
+{
+ char fwrev[32];
+ struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
+ struct list_head dma_buffer_list;
+ int i, rc = 0;
+ struct lpfc_dmabuf *dmabuf, *next;
+ uint32_t offset = 0, temp_offset = 0;
+
+ INIT_LIST_HEAD(&dma_buffer_list);
+ if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
+ (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
+ (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
+ (image->size != fw->size)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3022 Invalid FW image found. "
+ "Magic:%d Type:%x ID:%x\n",
+ image->magic_number,
+ bf_get(lpfc_grp_hdr_file_type, image),
+ bf_get(lpfc_grp_hdr_id, image));
+ return -EINVAL;
+ }
+ lpfc_decode_firmware_rev(phba, fwrev, 1);
+ if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3023 Updating Firmware. Current Version:%s "
+ "New Version:%s\n",
+ fwrev, image->rev_name);
+ for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+ GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ SLI4_PAGE_SIZE,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ rc = -ENOMEM;
+ goto out;
+ }
+ list_add_tail(&dmabuf->list, &dma_buffer_list);
+ }
+ while (offset < fw->size) {
+ temp_offset = offset;
+ list_for_each_entry(dmabuf, &dma_buffer_list, list) {
+ if (offset + SLI4_PAGE_SIZE > fw->size) {
+ temp_offset += fw->size - offset;
+ memcpy(dmabuf->virt,
+ fw->data + temp_offset,
+ fw->size - offset);
+ break;
+ }
+ temp_offset += SLI4_PAGE_SIZE;
+ memcpy(dmabuf->virt, fw->data + temp_offset,
+ SLI4_PAGE_SIZE);
+ }
+ rc = lpfc_wr_object(phba, &dma_buffer_list,
+ (fw->size - offset), &offset);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3024 Firmware update failed. "
+ "%d\n", rc);
+ goto out;
+ }
+ }
+ rc = offset;
+ }
+out:
+ list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
+ list_del(&dmabuf->list);
+ dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+ return rc;
+}
+
+/**
* lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
@@ -8591,6 +8944,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
int error;
uint32_t cfg_mode, intr_mode;
int mcnt;
+ int adjusted_fcp_eq_count;
+ int fcp_qidx;
+ const struct firmware *fw;
+ uint8_t file_name[16];
/* Allocate memory for HBA structure */
phba = lpfc_hba_alloc(pdev);
@@ -8688,11 +9045,25 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
error = -ENODEV;
goto out_free_sysfs_attr;
}
- /* Default to single FCP EQ for non-MSI-X */
+ /* Default to single EQ for non-MSI-X */
if (phba->intr_type != MSIX)
- phba->cfg_fcp_eq_count = 1;
- else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
- phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ adjusted_fcp_eq_count = 0;
+ else if (phba->sli4_hba.msix_vec_nr <
+ phba->cfg_fcp_eq_count + 1)
+ adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ else
+ adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
+ /* Free unused EQs */
+ for (fcp_qidx = adjusted_fcp_eq_count;
+ fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+ /* do not delete the first fcp_cq */
+ if (fcp_qidx)
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.fcp_cq[fcp_qidx]);
+ }
+ phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8731,6 +9102,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
+ /* check for firmware upgrade or downgrade */
+ snprintf(file_name, 16, "%s.grp", phba->ModelName);
+ error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!error) {
+ lpfc_write_firmware(phba, fw);
+ release_firmware(fw);
+ }
+
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
@@ -9498,6 +9877,10 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e6ce9033f85e..556767028353 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -610,7 +610,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
- mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
/* save address for completion */
pmb->context1 = mp;
@@ -643,9 +644,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregDID.did = did;
- if (vpi != 0xffff)
- vpi += phba->vpi_base;
mb->un.varUnregDID.vpi = vpi;
+ if ((vpi != 0xffff) &&
+ (phba->sli_rev == LPFC_SLI_REV4))
+ mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_D_ID;
mb->mbxOwner = OWN_HOST;
@@ -738,12 +740,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegLogin.rpi = 0;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- mb->un.varRegLogin.rpi = rpi;
- if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
- return 1;
- }
- mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
mb->un.varRegLogin.did = did;
mb->mbxOwner = OWN_HOST;
/* Get a buffer to hold NPorts Service Parameters */
@@ -757,7 +757,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
"rpi x%x\n", vpi, did, rpi);
- return (1);
+ return 1;
}
INIT_LIST_HEAD(&mp->list);
sparam = mp->virt;
@@ -773,7 +773,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
- return (0);
+ return 0;
}
/**
@@ -789,6 +789,9 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
*
* This routine prepares the mailbox command for unregistering remote port
* login.
+ *
+ * For SLI4 ports, the rpi passed to this function must be the physical
+ * rpi value, not the logical index.
**/
void
lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
@@ -799,9 +802,10 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- mb->un.varUnregLogin.rpi = (uint16_t) rpi;
+ mb->un.varUnregLogin.rpi = rpi;
mb->un.varUnregLogin.rsvd1 = 0;
- mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_LOGIN;
mb->mbxOwner = OWN_HOST;
@@ -825,9 +829,16 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi,
- vport->vpi + phba->vpi_base, mbox);
- mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
+ /*
+ * For SLI4 functions, the rpi field is overloaded for
+ * the vport context unreg all. This routine passes
+ * 0 for the rpi field in lpfc_unreg_login for compatibility
+ * with SLI3 and then overrides the rpi field with the
+ * expected value for SLI4.
+ */
+ lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
+ mbox);
+ mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -865,9 +876,13 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
mb->un.varRegVpi.upd = 1;
- mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
+
+ mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
mb->un.varRegVpi.sid = vport->fc_myDID;
- mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
+ else
+ mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
sizeof(struct lpfc_name));
mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
@@ -901,10 +916,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- if (phba->sli_rev < LPFC_SLI_REV4)
- mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
- else
- mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev == LPFC_SLI_REV3)
+ mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
+ else if (phba->sli_rev >= LPFC_SLI_REV4)
+ mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_VPI;
mb->mbxOwner = OWN_HOST;
@@ -1735,12 +1750,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
return length;
}
- /* Setup for the none-embedded mbox command */
+ /* Setup for the non-embedded mbox command */
pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
/* Allocate record for keeping SGE virtual addresses */
- mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+ mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
GFP_KERNEL);
if (!mbox->sge_array) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
@@ -1790,12 +1805,87 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
/* The sub-header is in DMA memory, which needs endian converstion */
if (cfg_shdr)
lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
- sizeof(union lpfc_sli4_cfg_shdr));
-
+ sizeof(union lpfc_sli4_cfg_shdr));
return alloc_len;
}
/**
+ * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to an allocated lpfc mbox resource.
+ * @exts_count: the number of extents, if required, to allocate.
+ * @rsrc_type: the resource extent type.
+ * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
+ *
+ * This routine completes the subcommand header for SLI4 resource extent
+ * mailbox commands. It is called after lpfc_sli4_config. The caller must
+ * pass an allocated mailbox and the attributes required to initialize the
+ * mailbox correctly.
+ *
+ * Return: the actual length of the mbox command allocated.
+ **/
+int
+lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+ uint16_t exts_count, uint16_t rsrc_type, bool emb)
+{
+ uint8_t opcode = 0;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
+ void *virtaddr = NULL;
+
+ /* Set up SLI4 ioctl command header fields */
+ if (emb == LPFC_SLI4_MBX_NEMBED) {
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ virtaddr = mbox->sge_array->addr[0];
+ if (virtaddr == NULL)
+ return 1;
+ n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ }
+
+ /*
+ * The resource type is common to all extent Opcodes and resides in the
+ * same position.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ rsrc_type);
+ else {
+ /* This is DMA data. Byteswap is required. */
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ n_rsrc_extnt, rsrc_type);
+ lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
+ &n_rsrc_extnt->word4,
+ sizeof(uint32_t));
+ }
+
+ /* Complete the initialization for the particular Opcode. */
+ opcode = lpfc_sli4_mbox_opcode_get(phba, mbox);
+ switch (opcode) {
+ case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ exts_count);
+ else
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ n_rsrc_extnt, exts_count);
+ break;
+ case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
+ case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
+ case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
+ /* Initialization is complete.*/
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2929 Resource Extent Opcode x%x is "
+ "unsupported\n", opcode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
@@ -1939,9 +2029,12 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
bf_set(lpfc_init_vfi_vr, init_vfi, 1);
bf_set(lpfc_init_vfi_vt, init_vfi, 1);
bf_set(lpfc_init_vfi_vp, init_vfi, 1);
- bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
- bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base);
- bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
+ bf_set(lpfc_init_vfi_vfi, init_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+ bf_set(lpfc_init_vpi_vpi, init_vfi,
+ vport->phba->vpi_ids[vport->vpi]);
+ bf_set(lpfc_init_vfi_fcfi, init_vfi,
+ vport->phba->fcf.fcfi);
}
/**
@@ -1964,9 +2057,10 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
reg_vfi = &mbox->u.mqe.un.reg_vfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
- bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
+ bf_set(lpfc_reg_vfi_vfi, reg_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
- bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
+ bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
@@ -1997,9 +2091,9 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
- vpi + phba->vpi_base);
+ phba->vpi_ids[vpi]);
bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
- phba->pport->vfi + phba->vfi_base);
+ phba->sli4_hba.vfi_ids[phba->pport->vfi]);
}
/**
@@ -2019,7 +2113,7 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
- vport->vfi + vport->phba->vfi_base);
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
}
/**
@@ -2131,12 +2225,14 @@ lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
void
lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = ndlp->phba;
struct lpfc_mbx_resume_rpi *resume_rpi;
memset(mbox, 0, sizeof(*mbox));
resume_rpi = &mbox->u.mqe.un.resume_rpi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
- bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
+ bf_set(lpfc_resume_rpi_index, resume_rpi,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cbb48ee8b0bb..10d5b5e41499 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -62,7 +62,6 @@ int
lpfc_mem_alloc(struct lpfc_hba *phba, int align)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
- int longs;
int i;
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -138,17 +137,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
phba->lpfc_hrb_pool = NULL;
phba->lpfc_drb_pool = NULL;
}
- /* vpi zero is reserved for the physical port so add 1 to max */
- longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
- phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
- if (!phba->vpi_bmask)
- goto fail_free_dbq_pool;
return 0;
-
- fail_free_dbq_pool:
- pci_pool_destroy(phba->lpfc_drb_pool);
- phba->lpfc_drb_pool = NULL;
fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
@@ -191,9 +181,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
int i;
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
- /* Free VPI bitmask memory */
- kfree(phba->vpi_bmask);
-
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
if (phba->lpfc_drb_pool)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0d92d4205ea6..2ddd02f7c603 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,11 +350,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- /*
- * Need to unreg_login if we are already in one of these states and
- * change to NPR state. This will block the port until after the ACC
- * completes and the reg_login is issued and completed.
- */
+ /* no need to reg_login if we are already in one of these states */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -363,9 +359,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_unreg_rpi(vport, ndlp);
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
+ return 1;
}
if ((vport->fc_flag & FC_PT2PT) &&
@@ -657,6 +652,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
+
/**
* lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
* @phba : Pointer to lpfc_hba structure.
@@ -1399,8 +1395,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
if (mb->mbxStatus) {
/* RegLogin failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0246 RegLogin failed Data: x%x x%x x%x\n",
- did, mb->mbxStatus, vport->port_state);
+ "0246 RegLogin failed Data: x%x x%x x%x x%x "
+ "x%x\n",
+ did, mb->mbxStatus, vport->port_state,
+ mb->un.varRegLogin.vpi,
+ mb->un.varRegLogin.rpi);
/*
* If RegLogin failed due to lack of HBA resources do not
* retry discovery.
@@ -1424,7 +1423,10 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
return ndlp->nlp_state;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
/* Only if we are not a fabric nport do we issue PRLI */
@@ -2025,7 +2027,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
MAILBOX_t *mb = &pmb->u.mb;
if (!mb->mbxStatus) {
- ndlp->nlp_rpi = mb->un.varWords[0];
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
} else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 84e4481b2406..3ccc97496ebf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -743,7 +743,14 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
if (bcnt == 0)
continue;
/* Now, post the SCSI buffer list sgls as a block */
- status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ if (!phba->sli4_hba.extents_in_use)
+ status = lpfc_sli4_post_scsi_sgl_block(phba,
+ &sblist,
+ bcnt);
+ else
+ status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+ &sblist,
+ bcnt);
/* Reset SCSI buffer count for next round of posting */
bcnt = 0;
while (!list_empty(&sblist)) {
@@ -787,7 +794,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
dma_addr_t pdma_phys_fcp_cmd;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
- uint16_t iotag, last_xritag = NO_XRI;
+ uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
int status = 0, index;
int bcnt;
int non_sequential_xri = 0;
@@ -823,13 +830,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
break;
}
- psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
- if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
}
+ psb->cur_iocbq.sli4_lxritag = lxri;
+ psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
if (last_xritag != NO_XRI
&& psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
non_sequential_xri = 1;
@@ -861,6 +870,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
*/
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
@@ -869,6 +879,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
/* Setup the physical region for the FCP RSP */
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
@@ -914,7 +925,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
}
}
if (bcnt) {
- status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ if (!phba->sli4_hba.extents_in_use)
+ status = lpfc_sli4_post_scsi_sgl_block(phba,
+ &sblist,
+ bcnt);
+ else
+ status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+ &sblist,
+ bcnt);
+
+ if (status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "3021 SCSI SGL post error %d\n",
+ status);
+ bcnt = 0;
+ }
/* Reset SCSI buffer count for next round of posting */
while (!list_empty(&sblist)) {
list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
@@ -2081,6 +2106,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dma_len = sg_dma_len(sgel);
sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+ sgl->word2 = le32_to_cpu(sgl->word2);
if ((num_bde + 1) == nseg)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
@@ -2794,6 +2820,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
* of the scsi_cmnd request_buffer
*/
piocbq->iocb.ulpContext = pnode->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ piocbq->iocb.ulpContext =
+ phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
piocbq->iocb.ulpFCP2Rcvy = 1;
else
@@ -2807,7 +2836,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
/**
- * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
+ * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
* @lun: Logical unit number.
@@ -2851,6 +2880,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
piocb->ulpContext = ndlp->nlp_rpi;
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ piocb->ulpContext =
+ vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ }
if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
piocb->ulpFCP2Rcvy = 1;
}
@@ -3405,9 +3438,10 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %d "
- "rpi x%x nlp_flag x%x\n",
+ "rpi x%x nlp_flag x%x Data: x%x x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
- pnode->nlp_rpi, pnode->nlp_flag);
+ pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
+ iocbq->iocb_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -3419,10 +3453,12 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
ret = FAILED;
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
+ "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
+ "iocb_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
- iocbqrsp->iocb.un.ulpWord[4]);
+ iocbqrsp->iocb.un.ulpWord[4],
+ iocbq->iocb_flag);
} else if (status == IOCB_BUSY)
ret = FAILED;
else
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fd5835e1c039..98999bbd8cbf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -65,6 +65,9 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
struct lpfc_iocbq *);
static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
+static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_cqe *);
+
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
{
@@ -456,7 +459,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
struct lpfc_iocbq * iocbq = NULL;
list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
-
if (iocbq)
phba->iocb_cnt++;
if (phba->iocb_cnt > phba->iocb_max)
@@ -479,13 +481,10 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
static struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
- uint16_t adj_xri;
struct lpfc_sglq *sglq;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
- return NULL;
- sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
- phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
+ phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
return sglq;
}
@@ -504,12 +503,9 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
- uint16_t adj_xri;
struct lpfc_sglq *sglq;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
- return NULL;
- sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
return sglq;
}
@@ -532,7 +528,6 @@ static int
__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
{
- uint16_t adj_xri;
struct lpfc_node_rrq *rrq;
int empty;
uint32_t did = 0;
@@ -553,21 +548,19 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/*
* set the active bit even if there is no mem available.
*/
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
-
if (NLP_CHK_FREE_REQ(ndlp))
goto out;
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
- if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+ if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
goto out;
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
- rrq->xritag = xritag;
+ rrq->xritag = phba->sli4_hba.xri_ids[xritag];
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
@@ -603,7 +596,6 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
uint16_t xritag,
struct lpfc_node_rrq *rrq)
{
- uint16_t adj_xri;
struct lpfc_nodelist *ndlp = NULL;
if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
@@ -619,8 +611,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
if (!ndlp)
goto out;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
+ if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
@@ -796,12 +787,9 @@ int
lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
- uint16_t adj_xri;
-
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
if (!ndlp)
return 0;
- if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+ if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
return 1;
else
return 0;
@@ -841,7 +829,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* @piocb: Pointer to the iocbq.
*
* This function is called with hbalock held. This function
- * Gets a new driver sglq object from the sglq list. If the
+ * gets a new driver sglq object from the sglq list. If the
* list is not empty then it is successful, it returns pointer to the newly
* allocated sglq object else it returns NULL.
**/
@@ -851,7 +839,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
struct lpfc_sglq *sglq = NULL;
struct lpfc_sglq *start_sglq = NULL;
- uint16_t adj_xri;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_nodelist *ndlp;
int found = 0;
@@ -870,8 +857,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
while (!found) {
if (!sglq)
return NULL;
- adj_xri = sglq->sli4_xritag -
- phba->sli4_hba.max_cfg_param.xri_base;
if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri.
@@ -888,7 +873,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
}
sglq->ndlp = ndlp;
found = 1;
- phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+ phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
sglq->state = SGL_ALLOCATED;
}
return sglq;
@@ -944,7 +929,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
- sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
+ sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+
if (sglq) {
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
@@ -971,6 +957,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
* Clean all volatile data fields, preserve iotag and node struct.
*/
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+ iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
@@ -2113,7 +2100,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
- vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
+ vpi = pmb->u.mb.un.varRegLogin.vpi;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -3881,8 +3868,10 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
list_del_init(&phba->sli4_hba.els_cq->list);
for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
- for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
+ qindx = 0;
+ do
list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
+ while (++qindx < phba->cfg_fcp_eq_count);
spin_unlock_irq(&phba->hbalock);
/* Now physically reset the device */
@@ -4318,6 +4307,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
continue;
} else if (rc)
break;
+
phba->link_state = LPFC_INIT_MBX_CMDS;
lpfc_config_port(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -4421,7 +4411,8 @@ int
lpfc_sli_hba_setup(struct lpfc_hba *phba)
{
uint32_t rc;
- int mode = 3;
+ int mode = 3, i;
+ int longs;
switch (lpfc_sli_mode) {
case 2:
@@ -4491,6 +4482,35 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
if (rc)
goto lpfc_sli_hba_setup_error;
+ /* Initialize VPIs. */
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ /*
+ * The VPI bitmask and physical ID array are allocated
+ * and initialized once only - at driver load. A port
+ * reset doesn't need to reinitialize this memory.
+ */
+ if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
+ longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!phba->vpi_bmask) {
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+
+ phba->vpi_ids = kzalloc(
+ (phba->max_vpi+1) * sizeof(uint16_t),
+ GFP_KERNEL);
+ if (!phba->vpi_ids) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+ for (i = 0; i < phba->max_vpi; i++)
+ phba->vpi_ids[i] = i;
+ }
+ }
+
/* Init HBQs */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
rc = lpfc_sli_hbq_setup(phba);
@@ -4677,9 +4697,11 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+ fcp_eqidx = 0;
+ do
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
LPFC_QUEUE_REARM);
+ while (++fcp_eqidx < phba->cfg_fcp_eq_count);
lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -4687,6 +4709,803 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+static int
+lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
+ uint16_t *extnt_count, uint16_t *extnt_size)
+{
+ int rc = 0;
+ uint32_t length;
+ uint32_t mbox_tmo;
+ struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
+ LPFC_MBOXQ_t *mbox;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Find out how many extents are available for this resource type */
+ length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the GET doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2930 Failed to get resource extents "
+ "Status 0x%x Add'l Status 0x%x\n",
+ bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &rsrc_info->header.cfg_shdr.response));
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
+ &rsrc_info->u.rsp);
+ *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
+ &rsrc_info->u.rsp);
+ err_exit:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The extent type to check.
+ *
+ * This function reads the current available extents from the port and checks
+ * if the extent count or extent size has changed since the last access.
+ * Callers use this routine post port reset to understand if there is a
+ * extent reprovisioning requirement.
+ *
+ * Returns:
+ * -Error: error indicates problem.
+ * 1: Extent count or size has changed.
+ * 0: No changes.
+ **/
+static int
+lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
+{
+ uint16_t curr_ext_cnt, rsrc_ext_cnt;
+ uint16_t size_diff, rsrc_ext_size;
+ int rc = 0;
+ struct lpfc_rsrc_blks *rsrc_entry;
+ struct list_head *rsrc_blk_list = NULL;
+
+ size_diff = 0;
+ curr_ext_cnt = 0;
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ rsrc_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ break;
+ }
+
+ list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
+ curr_ext_cnt++;
+ if (rsrc_entry->rsrc_size != rsrc_ext_size)
+ size_diff++;
+ }
+
+ if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
+ rc = 1;
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_cfg_post_extnts -
+ * @phba: Pointer to HBA context object.
+ * @extnt_cnt - number of available extents.
+ * @type - the extent type (rpi, xri, vfi, vpi).
+ * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
+ * @mbox - pointer to the caller's allocated mailbox structure.
+ *
+ * This function executes the extents allocation request. It also
+ * takes care of the amount of memory needed to allocate or get the
+ * allocated extents. It is the caller's responsibility to evaluate
+ * the response.
+ *
+ * Returns:
+ * -Error: Error value describes the condition found.
+ * 0: if successful
+ **/
+static int
+lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
+ uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
+{
+ int rc = 0;
+ uint32_t req_len;
+ uint32_t emb_len;
+ uint32_t alloc_len, mbox_tmo;
+
+ /* Calculate the total requested length of the dma memory */
+ req_len = *extnt_cnt * sizeof(uint16_t);
+
+ /*
+ * Calculate the size of an embedded mailbox. The uint32_t
+ * accounts for extents-specific word.
+ */
+ emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+ sizeof(uint32_t);
+
+ /*
+ * Presume the allocation and response will fit into an embedded
+ * mailbox. If not true, reconfigure to a non-embedded mailbox.
+ */
+ *emb = LPFC_SLI4_MBX_EMBED;
+ if (req_len > emb_len) {
+ req_len = *extnt_cnt * sizeof(uint16_t) +
+ sizeof(union lpfc_sli4_cfg_shdr) +
+ sizeof(uint32_t);
+ *emb = LPFC_SLI4_MBX_NEMBED;
+ }
+
+ alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
+ req_len, *emb);
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "9000 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ return -ENOMEM;
+ }
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
+ if (unlikely(rc))
+ return -EIO;
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+
+ if (unlikely(rc))
+ rc = -EIO;
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type to allocate.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+static int
+lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ bool emb = false;
+ uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
+ uint16_t rsrc_id, rsrc_start, j, k;
+ uint16_t *ids;
+ int i, rc;
+ unsigned long longs;
+ unsigned long *bmask;
+ struct lpfc_rsrc_blks *rsrc_blks;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t length;
+ struct lpfc_id_range *id_array = NULL;
+ void *virtaddr = NULL;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+ struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+ struct list_head *ext_blk_list;
+
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_cnt,
+ &rsrc_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "3009 No available Resource Extents "
+ "for resource type 0x%x: Count: 0x%x, "
+ "Size 0x%x\n", type, rsrc_cnt,
+ rsrc_size);
+ return -ENOMEM;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
+ "2903 Available Resource Extents "
+ "for resource type 0x%x: Count: 0x%x, "
+ "Size 0x%x\n", type, rsrc_cnt,
+ rsrc_size);
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ /*
+ * Figure out where the response is located. Then get local pointers
+ * to the response data. The port does not guarantee to respond to
+ * all extents counts request so update the local variable with the
+ * allocated count from the port.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED) {
+ rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+ id_array = &rsrc_ext->u.rsp.id[0];
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+ } else {
+ virtaddr = mbox->sge_array->addr[0];
+ n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+ id_array = &n_rsrc->id;
+ }
+
+ longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ rsrc_id_cnt = rsrc_cnt * rsrc_size;
+
+ /*
+ * Based on the resource size and count, correct the base and max
+ * resource values.
+ */
+ length = sizeof(struct lpfc_rsrc_blks);
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ kfree(phba->sli4_hba.rpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /*
+ * The next_rpi was initialized with the maximum available
+ * count but the port may allocate a smaller number. Catch
+ * that case and update the next_rpi.
+ */
+ phba->sli4_hba.next_rpi = rsrc_id_cnt;
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.rpi_bmask;
+ ids = phba->sli4_hba.rpi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->vpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->vpi_bmask;
+ ids = phba->vpi_ids;
+ ext_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ kfree(phba->sli4_hba.xri_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.xri_bmask;
+ ids = phba->sli4_hba.xri_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ kfree(phba->sli4_hba.vfi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.vfi_bmask;
+ ids = phba->sli4_hba.vfi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ /* Unsupported Opcode. Fail call. */
+ id_array = NULL;
+ bmask = NULL;
+ ids = NULL;
+ ext_blk_list = NULL;
+ goto err_exit;
+ }
+
+ /*
+ * Complete initializing the extent configuration with the
+ * allocated ids assigned to this function. The bitmask serves
+ * as an index into the array and manages the available ids. The
+ * array just stores the ids communicated to the port via the wqes.
+ */
+ for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
+ if ((i % 2) == 0)
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
+ &id_array[k]);
+ else
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
+ &id_array[k]);
+
+ rsrc_blks = kzalloc(length, GFP_KERNEL);
+ if (unlikely(!rsrc_blks)) {
+ rc = -ENOMEM;
+ kfree(bmask);
+ kfree(ids);
+ goto err_exit;
+ }
+ rsrc_blks->rsrc_start = rsrc_id;
+ rsrc_blks->rsrc_size = rsrc_size;
+ list_add_tail(&rsrc_blks->list, ext_blk_list);
+ rsrc_start = rsrc_id;
+ if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
+ phba->sli4_hba.scsi_xri_start = rsrc_start +
+ lpfc_sli4_get_els_iocb_cnt(phba);
+
+ while (rsrc_id < (rsrc_start + rsrc_size)) {
+ ids[j] = rsrc_id;
+ rsrc_id++;
+ j++;
+ }
+ /* Entire word processed. Get next word.*/
+ if ((i % 2) == 1)
+ k++;
+ }
+ err_exit:
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: the extent's type.
+ *
+ * This function deallocates all extents of a particular resource type.
+ * SLI4 does not allow for deallocating a particular extent range. It
+ * is the caller's responsibility to release all kernel memory resources.
+ **/
+static int
+lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ int rc;
+ uint32_t length, mbox_tmo = 0;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
+ struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /*
+ * This function sends an embedded mailbox because it only sends the
+ * the resource type. All extents of this type are released by the
+ * port.
+ */
+ length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the dealloc doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2919 Failed to release resource extents "
+ "for type %d - Status 0x%x Add'l Status 0x%x. "
+ "Resource memory not released.\n",
+ type,
+ bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &dealloc_rsrc->header.cfg_shdr.response));
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ /* Release kernel memory resources for the specific type. */
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->lpfc_vpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_xri_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_vfi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ /* RPI bitmask and physical id array are cleaned up earlier. */
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_rpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ default:
+ break;
+ }
+
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+
+ out_free_mbox:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+int
+lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ int i, rc, error = 0;
+ uint16_t count, base;
+ unsigned long longs;
+
+ if (phba->sli4_hba.extents_in_use) {
+ /*
+ * The port supports resource extents. The XRI, VPI, VFI, RPI
+ * resource extent count must be read and allocated before
+ * provisioning the resource id arrays.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY) {
+ /*
+ * Extent-based resources are set - the driver could
+ * be in a port reset. Figure out if any corrective
+ * actions need to be taken.
+ */
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ if (rc != 0)
+ error++;
+
+ /*
+ * It's possible that the number of resources
+ * provided to this port instance changed between
+ * resets. Detect this condition and reallocate
+ * resources. Otherwise, there is no action.
+ */
+ if (error) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_MBOX | LOG_INIT,
+ "2931 Detected extent resource "
+ "change. Reallocating all "
+ "extents.\n");
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ } else
+ return 0;
+ }
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ if (unlikely(rc))
+ goto err_exit;
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return rc;
+ } else {
+ /*
+ * The port does not support resource extents. The XRI, VPI,
+ * VFI, RPI resource ids were determined from READ_CONFIG.
+ * Just allocate the bitmasks and provision the resource id
+ * arrays. If a port reset is active, the resources don't
+ * need any action - just exit.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY)
+ return 0;
+
+ /* RPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_rpi;
+ base = phba->sli4_hba.max_cfg_param.rpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ rc = -ENOMEM;
+ goto free_rpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.rpi_ids[i] = base + i;
+
+ /* VPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vpi;
+ base = phba->sli4_hba.max_cfg_param.vpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto free_rpi_ids;
+ }
+ phba->vpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ rc = -ENOMEM;
+ goto free_vpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->vpi_ids[i] = base + i;
+
+ /* XRIs. */
+ count = phba->sli4_hba.max_cfg_param.max_xri;
+ base = phba->sli4_hba.max_cfg_param.xri_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto free_vpi_ids;
+ }
+ phba->sli4_hba.xri_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ rc = -ENOMEM;
+ goto free_xri_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.xri_ids[i] = base + i;
+
+ /* VFIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vfi;
+ base = phba->sli4_hba.max_cfg_param.vfi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto free_xri_ids;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ rc = -ENOMEM;
+ goto free_vfi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.vfi_ids[i] = base + i;
+
+ /*
+ * Mark all resources ready. An HBA reset doesn't need
+ * to reset the initialization.
+ */
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return 0;
+ }
+
+ free_vfi_bmask:
+ kfree(phba->sli4_hba.vfi_bmask);
+ free_xri_ids:
+ kfree(phba->sli4_hba.xri_ids);
+ free_xri_bmask:
+ kfree(phba->sli4_hba.xri_bmask);
+ free_vpi_ids:
+ kfree(phba->vpi_ids);
+ free_vpi_bmask:
+ kfree(phba->vpi_bmask);
+ free_rpi_ids:
+ kfree(phba->sli4_hba.rpi_ids);
+ free_rpi_bmask:
+ kfree(phba->sli4_hba.rpi_bmask);
+ err_exit:
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+int
+lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ if (phba->sli4_hba.extents_in_use) {
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ } else {
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -4708,10 +5527,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
struct lpfc_vport *vport = phba->pport;
struct lpfc_dmabuf *mp;
- /*
- * TODO: Why does this routine execute these task in a different
- * order from probe?
- */
/* Perform a PCI function reset to start from clean */
rc = lpfc_pci_function_reset(phba);
if (unlikely(rc))
@@ -4740,7 +5555,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
* to read FCoE param config regions
*/
if (lpfc_sli4_read_fcoe_params(phba, mboxq))
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
"2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
@@ -4873,6 +5688,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock);
+ /*
+ * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
+ * calls depends on these resources to complete port setup.
+ */
+ rc = lpfc_sli4_alloc_resource_identifiers(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2920 Failed to alloc Resource IDs "
+ "rc = x%x\n", rc);
+ goto out_free_mbox;
+ }
+
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
if (rc) {
@@ -4906,35 +5733,37 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
+ lpfc_update_vport_wwn(vport);
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
/* Register SGL pool to the device using non-embedded mailbox command */
- rc = lpfc_sli4_post_sgl_list(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0582 Error %d during sgl post operation\n",
- rc);
- rc = -ENODEV;
- goto out_free_mbox;
+ if (!phba->sli4_hba.extents_in_use) {
+ rc = lpfc_sli4_post_els_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0582 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
+ } else {
+ rc = lpfc_sli4_post_els_sgl_list_ext(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2560 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
}
/* Register SCSI SGL pool to the device */
rc = lpfc_sli4_repost_scsi_sgl_list(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0383 Error %d during scsi sgl post "
"operation\n", rc);
/* Some Scsi buffers were moved to the abort scsi list */
@@ -5747,10 +6576,15 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
sizeof(struct lpfc_mcqe));
mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
-
- /* Prefix the mailbox status with range x4000 to note SLI4 status. */
+ /*
+ * When the CQE status indicates a failure and the mailbox status
+ * indicates success then copy the CQE status into the mailbox status
+ * (and prefix it with x4000).
+ */
if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
- bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
+ if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
+ bf_set(lpfc_mqe_status, mb,
+ (LPFC_MBX_ERROR_RANGE | mcqe_status));
rc = MBXERR_ERROR;
} else
lpfc_sli4_swap_str(phba, mboxq);
@@ -5819,7 +6653,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
else
rc = -EIO;
if (rc != MBX_SUCCESS)
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"(%d):2541 Mailbox command x%x "
"(x%x) cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
@@ -6307,6 +7141,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
sgl->addr_hi = bpl->addrHigh;
sgl->addr_lo = bpl->addrLow;
+ sgl->word2 = le32_to_cpu(sgl->word2);
if ((i+1) == numBdes)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
@@ -6343,6 +7178,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
sgl->addr_lo =
cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len =
@@ -6474,7 +7310,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
>> LPFC_FIP_ELS_ID_SHIFT);
}
- bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi);
+ bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6623,14 +7460,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpContext);
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- iocbq->vport->vpi + phba->vpi_base);
+ phba->vpi_ids[iocbq->vport->vpi]);
bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
LPFC_WQE_LENLOC_WORD3);
bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
- bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi);
+ bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
@@ -6729,6 +7567,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
return IOCB_ERROR;
break;
}
+
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
wqe->generic.wqe_com.abort_tag = abort_tag;
@@ -6776,7 +7615,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_BUSY;
}
} else {
- sglq = __lpfc_sli_get_sglq(phba, piocb);
+ sglq = __lpfc_sli_get_sglq(phba, piocb);
if (!sglq) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
@@ -6789,11 +7628,11 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
}
} else if (piocb->iocb_flag & LPFC_IO_FCP) {
- sglq = NULL; /* These IO's already have an XRI and
- * a mapped sgl.
- */
+ /* These IO's already have an XRI and a mapped sgl. */
+ sglq = NULL;
} else {
- /* This is a continuation of a commandi,(CX) so this
+ /*
+ * This is a continuation of a commandi,(CX) so this
* sglq is on the active list
*/
sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
@@ -6802,8 +7641,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
if (sglq) {
+ piocb->sli4_lxritag = sglq->sli4_lxritag;
piocb->sli4_xritag = sglq->sli4_xritag;
-
if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
return IOCB_ERROR;
}
@@ -9799,7 +10638,12 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
+ if (cq->subtype == LPFC_FCP)
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
+ cqe);
+ else
+ workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
+ cqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
@@ -11446,6 +12290,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
LPFC_MBOXQ_t *mbox;
int rc;
uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
union lpfc_sli4_cfg_shdr *shdr;
if (xritag == NO_XRI) {
@@ -11479,8 +12324,10 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -11498,6 +12345,76 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * port for those SLI4 ports that do not support extents. This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
+ * and should be called only when interrupts are disabled.
+ *
+ * Return codes
+ * 0 - successful
+ * -ERROR - otherwise.
+ */
+uint16_t
+lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
+{
+ unsigned long xri;
+
+ /*
+ * Fetch the next logical xri. Because this index is logical,
+ * the driver starts at 0 each time.
+ */
+ spin_lock_irq(&phba->hbalock);
+ xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
+ phba->sli4_hba.max_cfg_param.max_xri, 0);
+ if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
+ spin_unlock_irq(&phba->hbalock);
+ return NO_XRI;
+ } else {
+ set_bit(xri, phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.max_cfg_param.xri_used++;
+ phba->sli4_hba.xri_count++;
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+ return xri;
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
+ phba->sli4_hba.xri_count--;
+ phba->sli4_hba.max_cfg_param.xri_used--;
+ }
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ spin_lock_irq(&phba->hbalock);
+ __lpfc_sli4_free_xri(phba, xri);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
* lpfc_sli4_next_xritag - Get an xritag for the io
* @phba: Pointer to HBA context object.
*
@@ -11510,30 +12427,23 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
uint16_t
lpfc_sli4_next_xritag(struct lpfc_hba *phba)
{
- uint16_t xritag;
+ uint16_t xri_index;
- spin_lock_irq(&phba->hbalock);
- xritag = phba->sli4_hba.next_xri;
- if ((xritag != (uint16_t) -1) && xritag <
- (phba->sli4_hba.max_cfg_param.max_xri
- + phba->sli4_hba.max_cfg_param.xri_base)) {
- phba->sli4_hba.next_xri++;
- phba->sli4_hba.max_cfg_param.xri_used++;
- spin_unlock_irq(&phba->hbalock);
- return xritag;
- }
- spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ xri_index = lpfc_sli4_alloc_xri(phba);
+ if (xri_index != NO_XRI)
+ return xri_index;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2004 Failed to allocate XRI.last XRITAG is %d"
" Max XRI is %d, Used XRI is %d\n",
- phba->sli4_hba.next_xri,
+ xri_index,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.max_cfg_param.xri_used);
- return -1;
+ return NO_XRI;
}
/**
- * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
+ * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post a block of driver's sgl pages to the
@@ -11542,7 +12452,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
* stopped.
**/
int
-lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
+lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
{
struct lpfc_sglq *sglq_entry;
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -11551,7 +12461,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mbox;
uint32_t reqlen, alloclen, pg_pairs;
uint32_t mbox_tmo;
- uint16_t xritag_start = 0;
+ uint16_t xritag_start = 0, lxri = 0;
int els_xri_cnt, rc = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
@@ -11568,11 +12478,8 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
return -ENOMEM;
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2560 Failed to allocate mbox cmd memory\n");
+ if (!mbox)
return -ENOMEM;
- }
/* Allocate DMA memory and set up the non-embedded mailbox command */
alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -11587,15 +12494,30 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
- /* Get the first SGE entry from the non-embedded DMA memory */
- viraddr = mbox->sge_array->addr[0];
-
/* Set up the SGL pages in the non-embedded DMA pages */
+ viraddr = mbox->sge_array->addr[0];
sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
sgl_pg_pairs = &sgl->sgl_pg_pairs;
for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
+
+ /*
+ * Assign the sglq a physical xri only if the driver has not
+ * initialized those resources. A port reset only needs
+ * the sglq's posted.
+ */
+ if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+ LPFC_XRI_RSRC_RDY) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ }
+
/* Set up the sge entry */
sgl_pg_pairs->sgl_pg0_addr_lo =
cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -11605,16 +12527,17 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
cpu_to_le32(putPaddrLow(0));
sgl_pg_pairs->sgl_pg1_addr_hi =
cpu_to_le32(putPaddrHigh(0));
+
/* Keep the first xritag on the list */
if (pg_pairs == 0)
xritag_start = sglq_entry->sli4_xritag;
sgl_pg_pairs++;
}
+
+ /* Complete initialization and perform endian conversion. */
bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
- /* Perform endian conversion if necessary */
sgl->word0 = cpu_to_le32(sgl->word0);
-
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
@@ -11633,6 +12556,181 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
}
+
+ if (rc == 0)
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_XRI_RSRC_RDY);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+int
+lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, index;
+ uint32_t mbox_tmo;
+ uint16_t rsrc_start, rsrc_size, els_xri_cnt;
+ uint16_t xritag_start = 0, lxri = 0;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ int cnt, ttl_cnt, rc = 0;
+ int loop_cnt;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* The number of sgls to be posted */
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+ reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2989 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+
+ cnt = 0;
+ ttl_cnt = 0;
+ list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ rsrc_start = rsrc_blk->rsrc_start;
+ rsrc_size = rsrc_blk->rsrc_size;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3014 Working ELS Extent start %d, cnt %d\n",
+ rsrc_start, rsrc_size);
+
+ loop_cnt = min(els_xri_cnt, rsrc_size);
+ if (ttl_cnt + loop_cnt >= els_xri_cnt) {
+ loop_cnt = els_xri_cnt - ttl_cnt;
+ ttl_cnt = els_xri_cnt;
+ }
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ /*
+ * Allocate DMA memory and set up the non-embedded mailbox
+ * command.
+ */
+ alloclen = lpfc_sli4_config(phba, mbox,
+ LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ reqlen, LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2987 Allocated DMA memory size (%d) "
+ "is less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ viraddr = mbox->sge_array->addr[0];
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ /*
+ * The starting resource may not begin at zero. Control
+ * the loop variants via the block resource parameters,
+ * but handle the sge pointers with a zero-based index
+ * that doesn't get reset per loop pass.
+ */
+ for (index = rsrc_start;
+ index < rsrc_start + loop_cnt;
+ index++) {
+ sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
+
+ /*
+ * Assign the sglq a physical xri only if the driver
+ * has not initialized those resources. A port reset
+ * only needs the sglq's posted.
+ */
+ if (bf_get(lpfc_xri_rsrc_rdy,
+ &phba->sli4_hba.sli4_flags) !=
+ LPFC_XRI_RSRC_RDY) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag =
+ phba->sli4_hba.xri_ids[lxri];
+ }
+
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(0));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(0));
+
+ /* Track the starting physical XRI for the mailbox. */
+ if (index == rsrc_start)
+ xritag_start = sglq_entry->sli4_xritag;
+ sgl_pg_pairs++;
+ cnt++;
+ }
+
+ /* Complete initialization and perform endian conversion. */
+ rsrc_blk->rsrc_used += loop_cnt;
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
+ sgl->word0 = cpu_to_le32(sgl->word0);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3015 Post ELS Extent SGL, start %d, "
+ "cnt %d, used %d\n",
+ xritag_start, loop_cnt, rsrc_blk->rsrc_used);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
+ &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2988 POST_SGL_BLOCK mailbox "
+ "command failed status x%x "
+ "add_status x%x mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ goto err_exit;
+ }
+ if (ttl_cnt >= els_xri_cnt)
+ break;
+ }
+
+ err_exit:
+ if (rc == 0)
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_XRI_RSRC_RDY);
return rc;
}
@@ -11693,6 +12791,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
+
/* Get the first SGE entry from the non-embedded DMA memory */
viraddr = mbox->sge_array->addr[0];
@@ -11748,6 +12847,169 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
}
/**
+ * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
+ int cnt)
+{
+ struct lpfc_scsi_buf *psb = NULL;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, pg_pairs;
+ uint32_t mbox_tmo;
+ uint16_t xri_start = 0, scsi_xri_start;
+ uint16_t rsrc_range;
+ int rc = 0, avail_cnt;
+ uint32_t shdr_status, shdr_add_status;
+ dma_addr_t pdma_phys_bpl1;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ uint32_t xri_cnt = 0;
+
+ /* Calculate the total requested length of the dma memory */
+ reqlen = cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2932 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+
+ /*
+ * The use of extents requires the driver to post the sgl headers
+ * in multiple postings to meet the contiguous resource assignment.
+ */
+ psb = list_prepare_entry(psb, sblist, list);
+ scsi_xri_start = phba->sli4_hba.scsi_xri_start;
+ list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
+ if (rsrc_range < scsi_xri_start)
+ continue;
+ else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
+ continue;
+ else
+ avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
+
+ reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ /*
+ * Allocate DMA memory and set up the non-embedded mailbox
+ * command. The mbox is used to post an SGL page per loop
+ * but the DMA memory has a use-once semantic so the mailbox
+ * is used and freed per loop pass.
+ */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2933 Failed to allocate mbox cmd "
+ "memory\n");
+ return -ENOMEM;
+ }
+ alloclen = lpfc_sli4_config(phba, mbox,
+ LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2934 Allocated DMA memory size (%d) "
+ "is less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ viraddr = mbox->sge_array->addr[0];
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ /* pg_pairs tracks posted SGEs per loop iteration. */
+ pg_pairs = 0;
+ list_for_each_entry_continue(psb, sblist, list) {
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+ if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+ pdma_phys_bpl1 = psb->dma_phys_bpl +
+ SGL_PAGE_SIZE;
+ else
+ pdma_phys_bpl1 = 0;
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+ /* Keep the first xri for this extent. */
+ if (pg_pairs == 0)
+ xri_start = psb->cur_iocbq.sli4_xritag;
+ sgl_pg_pairs++;
+ pg_pairs++;
+ xri_cnt++;
+
+ /*
+ * Track two exit conditions - the loop has constructed
+ * all of the caller's SGE pairs or all available
+ * resource IDs in this extent are consumed.
+ */
+ if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
+ break;
+ }
+ rsrc_blk->rsrc_used += pg_pairs;
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3016 Post SCSI Extent SGL, start %d, cnt %d "
+ "blk use %d\n",
+ xri_start, pg_pairs, rsrc_blk->rsrc_used);
+ /* Perform endian conversion if necessary */
+ sgl->word0 = cpu_to_le32(sgl->word0);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2935 POST_SGL_BLOCK mailbox command "
+ "failed status x%x add_status x%x "
+ "mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ return -ENXIO;
+ }
+
+ /* Post only what is requested. */
+ if (xri_cnt >= cnt)
+ break;
+ }
+ return rc;
+}
+
+/**
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
* @phba: pointer to lpfc_hba struct that the frame was received on
* @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -12137,6 +13399,28 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
+ * @phba: Pointer to HBA context object.
+ * @xri: xri id in transaction.
+ *
+ * This function validates the xri maps to the known range of XRIs allocated an
+ * used by the driver.
+ **/
+static uint16_t
+lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
+ uint16_t xri)
+{
+ int i;
+
+ for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
+ if (xri == phba->sli4_hba.xri_ids[i])
+ return i;
+ }
+ return NO_XRI;
+}
+
+
+/**
* lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
* @phba: Pointer to HBA context object.
* @fc_hdr: pointer to a FC frame header.
@@ -12169,9 +13453,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
"SID:x%x\n", oxid, sid);
return;
}
- if (rxid >= phba->sli4_hba.max_cfg_param.xri_base
- && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
- + phba->sli4_hba.max_cfg_param.xri_base))
+ if (lpfc_sli4_xri_inrange(phba, rxid))
lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
/* Allocate buffer for rsp iocb */
@@ -12194,12 +13476,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
icmd->ulpBdeCount = 0;
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
- icmd->ulpContext = ndlp->nlp_rpi;
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
ctiocb->context1 = ndlp;
ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
/* If the oxid maps to the FCP XRI range or if it is out of range,
@@ -12380,8 +13663,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
- first_iocbq->iocb.unsli3.rcvsli3.vpi =
- vport->vpi + vport->phba->vpi_base;
+ /* iocbq is prepped for internal consumption. Logical vpi. */
+ first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
/* put the first buffer into the first IOCBq */
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
@@ -12461,7 +13744,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
&phba->sli.ring[LPFC_ELS_RING],
iocbq, fc_hdr->fh_r_ctl,
fc_hdr->fh_type))
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2540 Ring %d handler: unexpected Rctl "
"x%x Type x%x received\n",
LPFC_ELS_RING,
@@ -12558,9 +13841,24 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
{
struct lpfc_rpi_hdr *rpi_page;
uint32_t rc = 0;
+ uint16_t lrpi = 0;
+
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
- /* Post all rpi memory regions to the port. */
list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+ /*
+ * Assign the rpi headers a physical rpi only if the driver
+ * has not initialized those resources. A port reset only
+ * needs the headers posted.
+ */
+ if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+ LPFC_RPI_RSRC_RDY)
+ rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+
rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12571,6 +13869,9 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
}
}
+ exit:
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_RPI_RSRC_RDY);
return rc;
}
@@ -12594,10 +13895,15 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
LPFC_MBOXQ_t *mboxq;
struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
uint32_t rc = 0;
- uint32_t mbox_tmo;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return rc;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
+
/* The port is notified of the header region via a mailbox command. */
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
@@ -12609,16 +13915,19 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
/* Post all rpi memory regions to the port. */
hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
sizeof(struct lpfc_sli4_cfg_mhdr),
LPFC_SLI4_MBX_EMBED);
- bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
- hdr_tmpl, rpi_page->page_count);
+
+
+ /* Post the physical rpi to the port for this rpi header. */
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
rpi_page->start_rpi);
+ bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+ hdr_tmpl, rpi_page->page_count);
+
hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -12653,22 +13962,21 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
int
lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
{
- int rpi;
- uint16_t max_rpi, rpi_base, rpi_limit;
- uint16_t rpi_remaining;
+ unsigned long rpi;
+ uint16_t max_rpi, rpi_limit;
+ uint16_t rpi_remaining, lrpi = 0;
struct lpfc_rpi_hdr *rpi_hdr;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
- rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
rpi_limit = phba->sli4_hba.next_rpi;
/*
- * The valid rpi range is not guaranteed to be zero-based. Start
- * the search at the rpi_base as reported by the port.
+ * Fetch the next logical rpi. Because this index is logical,
+ * the driver starts at 0 each time.
*/
spin_lock_irq(&phba->hbalock);
- rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
- if (rpi >= rpi_limit || rpi < rpi_base)
+ rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+ if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
set_bit(rpi, phba->sli4_hba.rpi_bmask);
@@ -12678,7 +13986,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
/*
* Don't try to allocate more rpi header regions if the device limit
- * on available rpis max has been exhausted.
+ * has been exhausted.
*/
if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
(phba->sli4_hba.rpi_count >= max_rpi)) {
@@ -12687,13 +13995,21 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
}
/*
+ * RPI header postings are not required for SLI4 ports capable of
+ * extents.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use) {
+ spin_unlock_irq(&phba->hbalock);
+ return rpi;
+ }
+
+ /*
* If the driver is running low on rpi resources, allocate another
* page now. Note that the next_rpi value is used because
* it represents how many are actually in use whereas max_rpi notes
* how many are supported max by the device.
*/
- rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
- phba->sli4_hba.rpi_count;
+ rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
spin_unlock_irq(&phba->hbalock);
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
@@ -12702,6 +14018,8 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
"2002 Error Could not grow rpi "
"count\n");
} else {
+ lrpi = rpi_hdr->start_rpi;
+ rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
}
}
@@ -12751,6 +14069,8 @@ void
lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
{
kfree(phba->sli4_hba.rpi_bmask);
+ kfree(phba->sli4_hba.rpi_ids);
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
}
/**
@@ -13490,6 +14810,96 @@ out:
}
/**
+ * lpfc_wr_object - write an object to the firmware
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @dmabuf_list: list of dmabufs to write to the port.
+ * @size: the total byte value of the objects to write to the port.
+ * @offset: the current offset to be used to start the transfer.
+ *
+ * This routine will create a wr_object mailbox command to send to the port.
+ * the mailbox command will be constructed using the dma buffers described in
+ * @dmabuf_list to create a list of BDEs. This routine will fill in as many
+ * BDEs that the imbedded mailbox can support. The @offset variable will be
+ * used to indicate the starting offset of the transfer and will also return
+ * the offset after the write object mailbox has completed. @size is used to
+ * determine the end of the object and whether the eof bit should be set.
+ *
+ * Return 0 is successful and offset will contain the the new offset to use
+ * for the next write.
+ * Return negative value for error cases.
+ **/
+int
+lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ uint32_t size, uint32_t *offset)
+{
+ struct lpfc_mbx_wr_object *wr_object;
+ LPFC_MBOXQ_t *mbox;
+ int rc = 0, i = 0;
+ uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_dmabuf *dmabuf;
+ uint32_t written = 0;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_WRITE_OBJECT,
+ sizeof(struct lpfc_mbx_wr_object) -
+ sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+ wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
+ wr_object->u.request.write_offset = *offset;
+ sprintf((uint8_t *)wr_object->u.request.object_name, "/");
+ wr_object->u.request.object_name[0] =
+ cpu_to_le32(wr_object->u.request.object_name[0]);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
+ list_for_each_entry(dmabuf, dmabuf_list, list) {
+ if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
+ break;
+ wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
+ wr_object->u.request.bde[i].addrHigh =
+ putPaddrHigh(dmabuf->phys);
+ if (written + SLI4_PAGE_SIZE >= size) {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ (size - written);
+ written += (size - written);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
+ } else {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ SLI4_PAGE_SIZE;
+ written += SLI4_PAGE_SIZE;
+ }
+ i++;
+ }
+ wr_object->u.request.bde_count = i;
+ bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3025 Write Object mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ } else
+ *offset += wr_object->u.response.actual_write_length;
+ return rc;
+}
+
+/**
* lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
* @vport: pointer to vport data structure.
*
@@ -13644,7 +15054,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
* never happen
*/
sglq = __lpfc_clear_active_sglq(phba,
- sglq->sli4_xritag);
+ sglq->sli4_lxritag);
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2823 txq empty and txq_cnt is %d\n ",
@@ -13656,6 +15066,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
/* The xri and iocb resources secured,
* attempt to issue request
*/
+ piocbq->sli4_lxritag = sglq->sli4_lxritag;
piocbq->sli4_xritag = sglq->sli4_xritag;
if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
fail_msg = "to convert bpl to sgl";
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 453577c21c14..a0075b0af142 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -52,6 +52,7 @@ struct lpfc_iocbq {
struct list_head clist;
struct list_head dlist;
uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct lpfc_cq_event cq_event;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 1a3cbf88f2ce..4b1703554a26 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -310,7 +310,6 @@ struct lpfc_max_cfg_param {
uint16_t vfi_base;
uint16_t vfi_used;
uint16_t max_fcfi;
- uint16_t fcfi_base;
uint16_t fcfi_used;
uint16_t max_eq;
uint16_t max_rq;
@@ -365,6 +364,11 @@ struct lpfc_pc_sli4_params {
uint8_t rqv;
};
+struct lpfc_iov {
+ uint32_t pf_number;
+ uint32_t vf_number;
+};
+
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -444,10 +448,13 @@ struct lpfc_sli4_hba {
uint32_t intr_enable;
struct lpfc_bmbx bmbx;
struct lpfc_max_cfg_param max_cfg_param;
+ uint16_t extents_in_use; /* must allocate resource extents. */
+ uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
uint16_t next_rpi;
uint16_t scsi_xri_max;
uint16_t scsi_xri_cnt;
+ uint16_t scsi_xri_start;
struct list_head lpfc_free_sgl_list;
struct list_head lpfc_sgl_list;
struct lpfc_sglq **lpfc_els_sgl_array;
@@ -458,7 +465,17 @@ struct lpfc_sli4_hba {
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask;
+ uint16_t *rpi_ids;
uint16_t rpi_count;
+ struct list_head lpfc_rpi_blk_list;
+ unsigned long *xri_bmask;
+ uint16_t *xri_ids;
+ uint16_t xri_count;
+ struct list_head lpfc_xri_blk_list;
+ unsigned long *vfi_bmask;
+ uint16_t *vfi_ids;
+ uint16_t vfi_count;
+ struct list_head lpfc_vfi_blk_list;
struct lpfc_sli4_flags sli4_flags;
struct list_head sp_queue_event;
struct list_head sp_cqe_event_pool;
@@ -467,6 +484,7 @@ struct lpfc_sli4_hba {
struct list_head sp_els_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
+ struct lpfc_iov iov;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
};
@@ -490,6 +508,7 @@ struct lpfc_sglq {
enum lpfc_sgl_state state;
struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned xri. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct sli4_sge *sgl; /* pre-assigned SGL */
void *virt; /* virtual address. */
@@ -504,6 +523,13 @@ struct lpfc_rpi_hdr {
uint32_t start_rpi;
};
+struct lpfc_rsrc_blks {
+ struct list_head list;
+ uint16_t rsrc_start;
+ uint16_t rsrc_size;
+ uint16_t rsrc_used;
+};
+
/*
* SLI4 specific function prototypes
*/
@@ -543,8 +569,11 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
-int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
+ int);
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 30ba5440c67a..1feb551a57bc 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -83,7 +83,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
static int
lpfc_alloc_vpi(struct lpfc_hba *phba)
{
- int vpi;
+ unsigned long vpi;
spin_lock_irq(&phba->hbalock);
/* Start at bit 1 because vpi zero is reserved for the physical port */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 046dcc672ec1..7370c084b178 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.05.34-rc1"
-#define MEGASAS_RELDATE "Feb. 24, 2011"
-#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011"
+#define MEGASAS_VERSION "00.00.05.38-rc1"
+#define MEGASAS_RELDATE "May. 11, 2011"
+#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011"
/*
* Device IDs
@@ -76,8 +76,8 @@
#define MFI_STATE_READY 0xB0000000
#define MFI_STATE_OPERATIONAL 0xC0000000
#define MFI_STATE_FAULT 0xF0000000
-#define MFI_RESET_REQUIRED 0x00000001
-
+#define MFI_RESET_REQUIRED 0x00000001
+#define MFI_RESET_ADAPTER 0x00000002
#define MEGAMFI_FRAME_SIZE 64
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 89c623ebadbc..2d8cdce7b2f5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.05.34-rc1
+ * Version : v00.00.05.38-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -437,15 +437,18 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
static int
megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
{
- u32 status;
+ u32 status, mfiStatus = 0;
+
/*
* Check if it is our interrupt
*/
status = readl(&regs->outbound_intr_status);
- if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
- return 0;
- }
+ if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
/*
* Clear the interrupt by writing back the same value
@@ -455,8 +458,9 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
/* Dummy readl to force pci flush */
readl(&regs->outbound_doorbell_clear);
- return 1;
+ return mfiStatus;
}
+
/**
* megasas_fire_cmd_ppc - Sends command to the FW
* @frame_phys_addr : Physical address of cmd
@@ -477,17 +481,6 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
}
/**
- * megasas_adp_reset_ppc - For controller reset
- * @regs: MFI register set
- */
-static int
-megasas_adp_reset_ppc(struct megasas_instance *instance,
- struct megasas_register_set __iomem *regs)
-{
- return 0;
-}
-
-/**
* megasas_check_reset_ppc - For controller reset check
* @regs: MFI register set
*/
@@ -495,8 +488,12 @@ static int
megasas_check_reset_ppc(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
return 0;
}
+
static struct megasas_instance_template megasas_instance_template_ppc = {
.fire_cmd = megasas_fire_cmd_ppc,
@@ -504,7 +501,7 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
.disable_intr = megasas_disable_intr_ppc,
.clear_intr = megasas_clear_intr_ppc,
.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
- .adp_reset = megasas_adp_reset_ppc,
+ .adp_reset = megasas_adp_reset_xscale,
.check_reset = megasas_check_reset_ppc,
.service_isr = megasas_isr,
.tasklet = megasas_complete_cmd_dpc,
@@ -620,6 +617,9 @@ static int
megasas_check_reset_skinny(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
return 0;
}
@@ -3454,7 +3454,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
{
u32 max_sectors_1;
u32 max_sectors_2;
- u32 tmp_sectors;
+ u32 tmp_sectors, msix_enable;
struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info;
unsigned long bar_list;
@@ -3507,6 +3507,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (megasas_transition_to_ready(instance))
goto fail_ready_state;
+ /* Check if MSI-X is supported while in ready state */
+ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+ 0x4000000) >> 0x1a;
+ if (msix_enable && !msix_disable &&
+ !pci_enable_msix(instance->pdev, &instance->msixentry, 1))
+ instance->msi_flag = 1;
+
/* Get operational params, sge flags, send init cmd to controller */
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
@@ -4076,14 +4083,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
- /* Try to enable MSI-X */
- if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_VERDE_ZCR) &&
- !msix_disable && !pci_enable_msix(instance->pdev,
- &instance->msixentry, 1))
- instance->msi_flag = 1;
-
/*
* Initialize MFI Firmware
*/
@@ -4116,6 +4115,14 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
megasas_mgmt_info.max_index++;
/*
+ * Register with SCSI mid-layer
+ */
+ if (megasas_io_attach(instance))
+ goto fail_io_attach;
+
+ instance->unload = 0;
+
+ /*
* Initiate AEN (Asynchronous Event Notification)
*/
if (megasas_start_aen(instance)) {
@@ -4123,13 +4130,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail_start_aen;
}
- /*
- * Register with SCSI mid-layer
- */
- if (megasas_io_attach(instance))
- goto fail_io_attach;
-
- instance->unload = 0;
return 0;
fail_start_aen:
@@ -4332,10 +4332,6 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_set_dma_mask(pdev))
goto fail_set_dma_mask;
- /* Now re-enable MSI-X */
- if (instance->msi_flag)
- pci_enable_msix(instance->pdev, &instance->msixentry, 1);
-
/*
* Initialize MFI Firmware
*/
@@ -4348,6 +4344,10 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_transition_to_ready(instance))
goto fail_ready_state;
+ /* Now re-enable MSI-X */
+ if (instance->msi_flag)
+ pci_enable_msix(instance->pdev, &instance->msixentry, 1);
+
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
{
@@ -4384,12 +4384,6 @@ megasas_resume(struct pci_dev *pdev)
instance->instancet->enable_intr(instance->reg_set);
- /*
- * Initiate AEN (Asynchronous Event Notification)
- */
- if (megasas_start_aen(instance))
- printk(KERN_ERR "megasas: Start AEN failed\n");
-
/* Initialize the cmd completion timer */
if (poll_mode_io)
megasas_start_timer(instance, &instance->io_completion_timer,
@@ -4397,6 +4391,12 @@ megasas_resume(struct pci_dev *pdev)
MEGASAS_COMPLETION_TIMER_INTERVAL);
instance->unload = 0;
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance))
+ printk(KERN_ERR "megasas: Start AEN failed\n");
+
return 0;
fail_irq:
@@ -4527,6 +4527,11 @@ static void megasas_shutdown(struct pci_dev *pdev)
instance->unload = 1;
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+ instance->instancet->disable_intr(instance->reg_set);
+ free_irq(instance->msi_flag ? instance->msixentry.vector :
+ instance->pdev->irq, instance);
+ if (instance->msi_flag)
+ pci_disable_msix(instance->pdev);
}
/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 145a8cffb1fa..f13e7abd345a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -696,22 +696,6 @@ fail_get_cmd:
}
/*
- * megasas_return_cmd_for_smid - Returns a cmd_fusion for a SMID
- * @instance: Adapter soft state
- *
- */
-void
-megasas_return_cmd_for_smid(struct megasas_instance *instance, u16 smid)
-{
- struct fusion_context *fusion;
- struct megasas_cmd_fusion *cmd;
-
- fusion = instance->ctrl_context;
- cmd = fusion->cmd_list[smid - 1];
- megasas_return_cmd_fusion(instance, cmd);
-}
-
-/*
* megasas_get_ld_map_info - Returns FW's ld_map structure
* @instance: Adapter soft state
* @pend: Pend the command or not
@@ -1153,7 +1137,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
u64 start_blk = io_info->pdBlock;
u8 *cdb = io_request->CDB.CDB32;
u32 num_blocks = io_info->numBlocks;
- u8 opcode, flagvals, groupnum, control;
+ u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
/* Check if T10 PI (DIF) is enabled for this LD */
ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
@@ -1235,7 +1219,46 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[8] = (u8)(num_blocks & 0xff);
cdb[7] = (u8)((num_blocks >> 8) & 0xff);
+ io_request->IoFlags = 10; /* Specify 10-byte cdb */
cdb_len = 10;
+ } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
+ /* Convert to 16 byte CDB for large LBA's */
+ switch (cdb_len) {
+ case 6:
+ opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
+ control = cdb[5];
+ break;
+ case 10:
+ opcode =
+ cdb[0] == READ_10 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[6];
+ control = cdb[9];
+ break;
+ case 12:
+ opcode =
+ cdb[0] == READ_12 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ break;
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[14] = groupnum;
+ cdb[15] = control;
+
+ /* Transfer length */
+ cdb[13] = (u8)(num_blocks & 0xff);
+ cdb[12] = (u8)((num_blocks >> 8) & 0xff);
+ cdb[11] = (u8)((num_blocks >> 16) & 0xff);
+ cdb[10] = (u8)((num_blocks >> 24) & 0xff);
+
+ io_request->IoFlags = 16; /* Specify 16-byte cdb */
+ cdb_len = 16;
}
/* Normal case, just load LBA here */
@@ -2026,17 +2049,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
- u32 host_diag, abs_state;
+ u32 host_diag, abs_state, status_reg, reset_adapter;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
- mutex_lock(&instance->reset_mutex);
- set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
- instance->instancet->disable_intr(instance->reg_set);
- msleep(1000);
-
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
"returning FAILED.\n");
@@ -2044,6 +2061,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
goto out;
}
+ mutex_lock(&instance->reset_mutex);
+ set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->instancet->disable_intr(instance->reg_set);
+ msleep(1000);
+
/* First try waiting for commands to complete */
if (megasas_wait_for_outstanding_fusion(instance)) {
printk(KERN_WARNING "megaraid_sas: resetting fusion "
@@ -2060,7 +2083,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
}
}
- if (instance->disableOnlineCtrlReset == 1) {
+ status_reg = instance->instancet->read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg & MFI_STATE_MASK;
+ reset_adapter = status_reg & MFI_RESET_ADAPTER;
+ if (instance->disableOnlineCtrlReset ||
+ (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
/* Reset not supported, kill adapter */
printk(KERN_WARNING "megaraid_sas: Reset not supported"
", killing adapter.\n");
@@ -2089,6 +2117,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
/* Check that the diag write enable (DRWE) bit is on */
host_diag = readl(&instance->reg_set->fusion_host_diag);
+ retry = 0;
while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
msleep(100);
host_diag =
@@ -2126,7 +2155,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
abs_state =
instance->instancet->read_fw_status_reg(
- instance->reg_set);
+ instance->reg_set) & MFI_STATE_MASK;
retry = 0;
while ((abs_state <= MFI_STATE_FW_INIT) &&
@@ -2134,7 +2163,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
msleep(100);
abs_state =
instance->instancet->read_fw_status_reg(
- instance->reg_set);
+ instance->reg_set) & MFI_STATE_MASK;
}
if (abs_state <= MFI_STATE_FW_INIT) {
printk(KERN_WARNING "megaraid_sas: firmware "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 2a3c05f6db8b..dcc289c25459 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "08.100.00.01"
+#define MPT2SAS_DRIVER_VERSION "08.100.00.02"
#define MPT2SAS_MAJOR_VERSION 08
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
-#define MPT2SAS_RELEASE_VERSION 01
+#define MPT2SAS_RELEASE_VERSION 02
/*
* Set MPT2SAS_SG_DEPTH value based on user input.
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index f12e02358d6d..a7dbc6825f5f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -113,6 +113,7 @@ struct sense_info {
};
+#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
/**
@@ -121,6 +122,7 @@ struct sense_info {
* @work: work object (ioc->fault_reset_work_q)
* @cancel_pending_work: flag set during reset handling
* @ioc: per adapter object
+ * @device_handle: device handle
* @VF_ID: virtual function id
* @VP_ID: virtual port id
* @ignore: flag meaning this event has been marked to ignore
@@ -134,6 +136,7 @@ struct fw_event_work {
u8 cancel_pending_work;
struct delayed_work delayed_work;
struct MPT2SAS_ADAPTER *ioc;
+ u16 device_handle;
u8 VF_ID;
u8 VP_ID;
u8 ignore;
@@ -3499,6 +3502,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
switch (prot_type) {
case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
/*
* enable ref/guard checking
@@ -3511,13 +3515,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
cpu_to_be32(scsi_get_lba(scmd));
break;
- case SCSI_PROT_DIF_TYPE2:
-
- eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- break;
-
case SCSI_PROT_DIF_TYPE3:
/*
@@ -4047,17 +4044,75 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
#endif
/**
- * _scsih_smart_predicted_fault - illuminate Fault LED
+ * _scsih_turn_on_fault_led - illuminate Fault LED
* @ioc: per adapter object
* @handle: device handle
+ * Context: process
*
* Return nothing.
*/
static void
-_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SepReply_t mpi_reply;
Mpi2SepRequest_t mpi_request;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus =
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ mpi_request.DevHandle = cpu_to_le16(handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
+ if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
+ "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+
+/**
+ * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
+ fw_event->device_handle = handle;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_smart_predicted_fault - process smart errors
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
struct scsi_target *starget;
struct MPT2SAS_TARGET *sas_target_priv_data;
Mpi2EventNotificationReply_t *event_reply;
@@ -4084,30 +4139,8 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
starget_printk(KERN_WARNING, starget, "predicted fault\n");
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) {
- memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
- mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
- mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
- mpi_request.SlotStatus =
- cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
- mpi_request.DevHandle = cpu_to_le16(handle);
- mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
- if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
- &mpi_request)) != 0) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
-
- if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "enclosure_processor: ioc_status (0x%04x), "
- "loginfo(0x%08x)\n", ioc->name,
- le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
- return;
- }
- }
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
+ _scsih_send_event_to_turn_on_fault_led(ioc, handle);
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -6753,6 +6786,9 @@ _firmware_event_work(struct work_struct *work)
}
switch (fw_event->event) {
+ case MPT2SAS_TURN_ON_FAULT_LED:
+ _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+ break;
case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
_scsih_sas_topology_change_event(ioc, fw_event);
break;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 58f5be4740e9..de0b1a704fb5 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -4698,12 +4698,14 @@ static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
break;
if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) {
+ int j;
+
STp->pos_unknown = 0;
STp->partition = STp->new_partition = 0;
if (STp->can_partitions)
STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
- for (i=0; i < ST_NBR_PARTITIONS; i++) {
- STps = &(STp->ps[i]);
+ for (j = 0; j < ST_NBR_PARTITIONS; j++) {
+ STps = &(STp->ps[j]);
STps->rw = ST_IDLE;
STps->eof = ST_NOEOF;
STps->at_sm = 0;
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 0339ff03a535..252523d7847e 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
- ql4_nx.o ql4_nvram.o ql4_dbg.o
+ ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
new file mode 100644
index 000000000000..864d018631c0
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -0,0 +1,69 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2011 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+
+/* Scsi_Host attributes. */
+static ssize_t
+qla4xxx_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ if (is_qla8022(ha))
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
+ ha->firmware_version[0],
+ ha->firmware_version[1],
+ ha->patch_number, ha->build_number);
+ else
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+ ha->firmware_version[0],
+ ha->firmware_version[1],
+ ha->patch_number, ha->build_number);
+}
+
+static ssize_t
+qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
+}
+
+static ssize_t
+qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major,
+ ha->iscsi_minor);
+}
+
+static ssize_t
+qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+ ha->bootload_major, ha->bootload_minor,
+ ha->bootload_patch, ha->bootload_build);
+}
+
+static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
+static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
+static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
+static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
+
+struct device_attribute *qla4xxx_host_attrs[] = {
+ &dev_attr_fw_version,
+ &dev_attr_serial_num,
+ &dev_attr_iscsi_version,
+ &dev_attr_optrom_version,
+ NULL,
+};
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 4757878d59dd..473c5c872b39 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -115,7 +115,7 @@
#define INVALID_ENTRY 0xFFFF
#define MAX_CMDS_TO_RISC 1024
#define MAX_SRBS MAX_CMDS_TO_RISC
-#define MBOX_AEN_REG_COUNT 5
+#define MBOX_AEN_REG_COUNT 8
#define MAX_INIT_RETRIES 5
/*
@@ -368,7 +368,6 @@ struct scsi_qla_host {
#define AF_INIT_DONE 1 /* 0x00000002 */
#define AF_MBOX_COMMAND 2 /* 0x00000004 */
#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
-#define AF_DPC_SCHEDULED 5 /* 0x00000020 */
#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
@@ -584,6 +583,14 @@ struct scsi_qla_host {
uint32_t nx_reset_timeout;
struct completion mbx_intr_comp;
+
+ /* --- From About Firmware --- */
+ uint16_t iscsi_major;
+ uint16_t iscsi_minor;
+ uint16_t bootload_major;
+ uint16_t bootload_minor;
+ uint16_t bootload_patch;
+ uint16_t bootload_build;
};
static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 31e2bf97198c..01082aa77098 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -690,6 +690,29 @@ struct mbx_sys_info {
uint8_t reserved[12]; /* 34-3f */
};
+struct about_fw_info {
+ uint16_t fw_major; /* 00 - 01 */
+ uint16_t fw_minor; /* 02 - 03 */
+ uint16_t fw_patch; /* 04 - 05 */
+ uint16_t fw_build; /* 06 - 07 */
+ uint8_t fw_build_date[16]; /* 08 - 17 ASCII String */
+ uint8_t fw_build_time[16]; /* 18 - 27 ASCII String */
+ uint8_t fw_build_user[16]; /* 28 - 37 ASCII String */
+ uint16_t fw_load_source; /* 38 - 39 */
+ /* 1 = Flash Primary,
+ 2 = Flash Secondary,
+ 3 = Host Download
+ */
+ uint8_t reserved1[6]; /* 3A - 3F */
+ uint16_t iscsi_major; /* 40 - 41 */
+ uint16_t iscsi_minor; /* 42 - 43 */
+ uint16_t bootload_major; /* 44 - 45 */
+ uint16_t bootload_minor; /* 46 - 47 */
+ uint16_t bootload_patch; /* 48 - 49 */
+ uint16_t bootload_build; /* 4A - 4B */
+ uint8_t reserved2[180]; /* 4C - FF */
+};
+
struct crash_record {
uint16_t fw_major_version; /* 00 - 01 */
uint16_t fw_minor_version; /* 02 - 03 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index cc53e3fbd78c..a53a256c1f8d 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -61,7 +61,7 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
int qla4xxx_add_sess(struct ddb_entry *);
void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
-int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
+int qla4xxx_about_firmware(struct scsi_qla_host *ha);
void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status);
int qla4xxx_init_rings(struct scsi_qla_host *ha);
@@ -139,4 +139,5 @@ extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
+extern struct device_attribute *qla4xxx_host_attrs[];
#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 48e2241ddaf4..42ed5db2d530 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1275,7 +1275,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
- if (qla4xxx_get_fw_version(ha) == QLA_ERROR)
+ if (qla4xxx_about_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 2f40ac761cd4..0e72921c752d 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -25,9 +25,14 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
- if (sense_len == 0)
+ if (sense_len == 0) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
+ " sense len 0\n", ha->host_no,
+ cmd->device->channel, cmd->device->id,
+ cmd->device->lun, __func__));
+ ha->status_srb = NULL;
return;
-
+ }
/* Save total available sense length,
* not to exceed cmd's sense buffer size */
sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
@@ -541,6 +546,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
case MBOX_ASTS_SUBNET_STATE_CHANGE:
+ case MBOX_ASTS_DUPLICATE_IP:
/* No action */
DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
mbox_status));
@@ -593,11 +599,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
mbox_sts[i];
/* print debug message */
- DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
- " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
- ha->host_no, ha->aen_in, mbox_sts[0],
- mbox_sts[1], mbox_sts[2], mbox_sts[3],
- mbox_sts[4]));
+ DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
+ "mb1:0x%x mb2:0x%x mb3:0x%x "
+ "mb4:0x%x mb5:0x%x\n",
+ ha->host_no, ha->aen_in,
+ mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5]));
/* advance pointer */
ha->aen_in++;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index d78b58dc5011..fce8289e9752 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -86,22 +86,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
msleep(10);
}
- /* To prevent overwriting mailbox registers for a command that has
- * not yet been serviced, check to see if an active command
- * (AEN, IOCB, etc.) is interrupting, then service it.
- * -----------------------------------------------------------------
- */
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (!is_qla8022(ha)) {
- intr_status = readl(&ha->reg->ctrl_status);
- if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
- /* Service existing interrupt */
- ha->isp_ops->interrupt_service_routine(ha, intr_status);
- clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
- }
- }
-
ha->mbox_status_count = outCount;
for (i = 0; i < outCount; i++)
ha->mbox_status[i] = 0;
@@ -1057,38 +1043,65 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
}
/**
- * qla4xxx_get_fw_version - gets firmware version
+ * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
* @ha: Pointer to host adapter structure.
*
- * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may
- * hold an address for data. Make sure that we write 0 to those mailboxes,
- * if unused.
+ * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
+ * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
+ * those mailboxes, if unused.
**/
-int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
+int qla4xxx_about_firmware(struct scsi_qla_host *ha)
{
+ struct about_fw_info *about_fw = NULL;
+ dma_addr_t about_fw_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_ERROR;
+
+ about_fw = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
+ if (!about_fw) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
+ "for about_fw\n", __func__));
+ return status;
+ }
- /* Get firmware version. */
+ memset(about_fw, 0, sizeof(struct about_fw_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
-
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
- QLA_SUCCESS) {
- DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
- "status %04X\n", ha->host_no, __func__, mbox_sts[0]));
- return QLA_ERROR;
+ mbox_cmd[2] = LSDW(about_fw_dma);
+ mbox_cmd[3] = MSDW(about_fw_dma);
+ mbox_cmd[4] = sizeof(struct about_fw_info);
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
+ "failed w/ status %04X\n", __func__,
+ mbox_sts[0]));
+ goto exit_about_fw;
}
- /* Save firmware version information. */
- ha->firmware_version[0] = mbox_sts[1];
- ha->firmware_version[1] = mbox_sts[2];
- ha->patch_number = mbox_sts[3];
- ha->build_number = mbox_sts[4];
+ /* Save version information. */
+ ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major);
+ ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor);
+ ha->patch_number = le16_to_cpu(about_fw->fw_patch);
+ ha->build_number = le16_to_cpu(about_fw->fw_build);
+ ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major);
+ ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
+ ha->bootload_major = le16_to_cpu(about_fw->bootload_major);
+ ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor);
+ ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch);
+ ha->bootload_build = le16_to_cpu(about_fw->bootload_build);
+ status = QLA_SUCCESS;
- return QLA_SUCCESS;
+exit_about_fw:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
+ about_fw, about_fw_dma);
+ return status;
}
static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 03e522b2fe0b..fdfe27b38698 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -964,12 +964,26 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
/* Halt all the indiviual PEGs and other blocks of the ISP */
qla4_8xxx_rom_lock(ha);
- /* mask all niu interrupts */
+ /* disable all I2Q */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
+
+ /* disable all niu interrupts */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
/* disable xge rx/tx */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
/* disable xg1 rx/tx */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+ /* disable sideband mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
+ /* disable ap0 mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
+ /* disable ap1 mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
/* halt sre */
val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
@@ -984,6 +998,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
/* halt pegs */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
@@ -991,9 +1006,9 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+ msleep(5);
/* big hammer */
- msleep(1000);
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
/* don't reset CAM block on reset */
qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c22f2a764d9d..f2364ec59f03 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -124,6 +124,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
+ .shost_attrs = qla4xxx_host_attrs,
};
static struct iscsi_transport qla4xxx_iscsi_transport = {
@@ -412,8 +413,7 @@ void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry,
- struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+ struct scsi_cmnd *cmd)
{
struct srb *srb;
@@ -427,7 +427,6 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
srb->cmd = cmd;
srb->flags = 0;
CMD_SP(cmd) = (void *)srb;
- cmd->scsi_done = done;
return srb;
}
@@ -458,9 +457,8 @@ void qla4xxx_srb_compl(struct kref *ref)
/**
* qla4xxx_queuecommand - scsi layer issues scsi command to driver.
+ * @host: scsi host
* @cmd: Pointer to Linux's SCSI command structure
- * @done_fn: Function that the driver calls to notify the SCSI mid-layer
- * that the command has been processed.
*
* Remarks:
* This routine is invoked by Linux to send a SCSI command to the driver.
@@ -470,10 +468,9 @@ void qla4xxx_srb_compl(struct kref *ref)
* completion handling). Unfortunely, it sometimes calls the scheduler
* in interrupt context which is a big NO! NO!.
**/
-static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
- struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+ struct scsi_qla_host *ha = to_qla_host(host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
struct iscsi_cls_session *sess = ddb_entry->sess;
struct srb *srb;
@@ -515,37 +512,29 @@ static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
goto qc_host_busy;
- spin_unlock_irq(ha->host->host_lock);
-
- srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
+ srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
if (!srb)
- goto qc_host_busy_lock;
+ goto qc_host_busy;
rval = qla4xxx_send_command_to_isp(ha, srb);
if (rval != QLA_SUCCESS)
goto qc_host_busy_free_sp;
- spin_lock_irq(ha->host->host_lock);
return 0;
qc_host_busy_free_sp:
qla4xxx_srb_free_dma(ha, srb);
mempool_free(srb, ha->srb_mempool);
-qc_host_busy_lock:
- spin_lock_irq(ha->host->host_lock);
-
qc_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc_fail_command:
- done(cmd);
+ cmd->scsi_done(cmd);
return 0;
}
-static DEF_SCSI_QCMD(qla4xxx_queuecommand)
-
/**
* qla4xxx_mem_free - frees memory allocated to adapter
* @ha: Pointer to host adapter structure.
@@ -679,7 +668,27 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
if (ha->seconds_since_last_heartbeat == 2) {
ha->seconds_since_last_heartbeat = 0;
halt_status = qla4_8xxx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
+ QLA82XX_PEG_HALT_STATUS1);
+
+ ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): %s, Dumping hw/fw registers:\n "
+ " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
+ " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
+ " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
+ " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
+ ha->host_no, __func__, halt_status,
+ qla4_8xxx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS2),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
+ 0x3c));
/* Since we cannot change dev_state in interrupt
* context, set appropriate DPC flag then wakeup
@@ -715,7 +724,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
- test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags))) {
+ test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (!ql4xdontresethba) {
@@ -839,7 +848,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
}
/* Wakeup the dpc routine for this adapter, if needed. */
- if ((start_dpc ||
+ if (start_dpc ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
@@ -849,9 +858,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
- test_bit(DPC_AEN, &ha->dpc_flags)) &&
- !test_bit(AF_DPC_SCHEDULED, &ha->flags) &&
- ha->dpc_thread) {
+ test_bit(DPC_AEN, &ha->dpc_flags)) {
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
" - dpc flags = 0x%lx\n",
ha->host_no, __func__, ha->dpc_flags));
@@ -1241,11 +1248,8 @@ static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
- if (ha->dpc_thread &&
- !test_bit(AF_DPC_SCHEDULED, &ha->flags)) {
- set_bit(AF_DPC_SCHEDULED, &ha->flags);
+ if (ha->dpc_thread)
queue_work(ha->dpc_thread, &ha->dpc_work);
- }
}
/**
@@ -1272,12 +1276,12 @@ static void qla4xxx_do_dpc(struct work_struct *work)
/* Initialization not yet finished. Don't do anything yet. */
if (!test_bit(AF_INIT_DONE, &ha->flags))
- goto do_dpc_exit;
+ return;
if (test_bit(AF_EEH_BUSY, &ha->flags)) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
ha->host_no, __func__, ha->flags));
- goto do_dpc_exit;
+ return;
}
if (is_qla8022(ha)) {
@@ -1384,8 +1388,6 @@ dpc_post_reset_ha:
}
}
-do_dpc_exit:
- clear_bit(AF_DPC_SCHEDULED, &ha->flags);
}
/**
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 603155769407..610492877253 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k6"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k7"
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index abea2cf05c2e..a4b9cdbaaa0b 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,6 +50,8 @@
#define BUS_RESET_SETTLE_TIME (10)
#define HOST_RESET_SETTLE_TIME (10)
+static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
+
/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
@@ -947,6 +949,48 @@ retry_tur:
}
/**
+ * scsi_eh_test_devices - check if devices are responding from error recovery.
+ * @cmd_list: scsi commands in error recovery.
+ * @work_q: queue for commands which still need more error recovery
+ * @done_q: queue for commands which are finished
+ * @try_stu: boolean on if a STU command should be tried in addition to TUR.
+ *
+ * Decription:
+ * Tests if devices are in a working state. Commands to devices now in
+ * a working state are sent to the done_q while commands to devices which
+ * are still failing to respond are returned to the work_q for more
+ * processing.
+ **/
+static int scsi_eh_test_devices(struct list_head *cmd_list,
+ struct list_head *work_q,
+ struct list_head *done_q, int try_stu)
+{
+ struct scsi_cmnd *scmd, *next;
+ struct scsi_device *sdev;
+ int finish_cmds;
+
+ while (!list_empty(cmd_list)) {
+ scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
+ sdev = scmd->device;
+
+ finish_cmds = !scsi_device_online(scmd->device) ||
+ (try_stu && !scsi_eh_try_stu(scmd) &&
+ !scsi_eh_tur(scmd)) ||
+ !scsi_eh_tur(scmd);
+
+ list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
+ if (scmd->device == sdev) {
+ if (finish_cmds)
+ scsi_eh_finish_cmd(scmd, done_q);
+ else
+ list_move_tail(&scmd->eh_entry, work_q);
+ }
+ }
+ return list_empty(work_q);
+}
+
+
+/**
* scsi_eh_abort_cmds - abort pending commands.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
@@ -962,6 +1006,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ LIST_HEAD(check_list);
int rtn;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
@@ -973,11 +1018,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- !scsi_eh_tur(scmd)) {
+ if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
- }
+ else
+ list_move_tail(&scmd->eh_entry, &check_list);
} else
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
" cmd failed:"
@@ -986,7 +1030,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
scmd));
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1137,6 +1181,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct list_head *done_q)
{
LIST_HEAD(tmp_list);
+ LIST_HEAD(check_list);
list_splice_init(work_q, &tmp_list);
@@ -1161,9 +1206,9 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
if (scmd_id(scmd) != id)
continue;
- if ((rtn == SUCCESS || rtn == FAST_IO_FAIL)
- && (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd)))
+ if (rtn == SUCCESS)
+ list_move_tail(&scmd->eh_entry, &check_list);
+ else if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
else
/* push back on work queue for further processing */
@@ -1171,7 +1216,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1185,6 +1230,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *chan_scmd, *next;
+ LIST_HEAD(check_list);
unsigned int channel;
int rtn;
@@ -1216,12 +1262,14 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
rtn = scsi_try_bus_reset(chan_scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
- if (channel == scmd_channel(scmd))
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- !scsi_eh_tur(scmd))
+ if (channel == scmd_channel(scmd)) {
+ if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd,
done_q);
+ else
+ list_move_tail(&scmd->eh_entry,
+ &check_list);
+ }
}
} else {
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
@@ -1230,7 +1278,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
channel));
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1242,6 +1290,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ LIST_HEAD(check_list);
int rtn;
if (!list_empty(work_q)) {
@@ -1252,12 +1301,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
, current->comm));
rtn = scsi_try_host_reset(scmd);
- if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
+ if (rtn == SUCCESS) {
+ list_splice_init(work_q, &check_list);
+ } else if (rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
- !scsi_eh_tur(scmd))
scsi_eh_finish_cmd(scmd, done_q);
}
} else {
@@ -1266,7 +1313,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
current->comm));
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
}
/**
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index b587289cfacb..2bea4f0b684a 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -59,6 +59,10 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
(unsigned long long)lba, (unsigned long long)txlen,
cdb[1] >> 5);
+
+ if (cdb[0] == WRITE_SAME)
+ trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
+
trace_seq_putc(p, 0);
return ret;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bd0806e64e85..953773cb26d9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -490,7 +490,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
unsigned int max_blocks = 0;
q->limits.discard_zeroes_data = sdkp->lbprz;
- q->limits.discard_alignment = sdkp->unmap_alignment;
+ q->limits.discard_alignment = sdkp->unmap_alignment *
+ logical_block_size;
q->limits.discard_granularity =
max(sdkp->physical_block_size,
sdkp->unmap_granularity * logical_block_size);
@@ -2021,16 +2022,26 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
int dbd;
int modepage;
+ int first_len;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
int old_wce = sdkp->WCE;
int old_rcd = sdkp->RCD;
int old_dpofua = sdkp->DPOFUA;
- if (sdp->skip_ms_page_8)
- goto defaults;
-
- if (sdp->type == TYPE_RBC) {
+ first_len = 4;
+ if (sdp->skip_ms_page_8) {
+ if (sdp->type == TYPE_RBC)
+ goto defaults;
+ else {
+ if (sdp->skip_ms_page_3f)
+ goto defaults;
+ modepage = 0x3F;
+ if (sdp->use_192_bytes_for_3f)
+ first_len = 192;
+ dbd = 0;
+ }
+ } else if (sdp->type == TYPE_RBC) {
modepage = 6;
dbd = 8;
} else {
@@ -2039,13 +2050,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
/* cautiously ask */
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr);
+ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
+ &data, &sshdr);
if (!scsi_status_is_good(res))
goto bad_sense;
if (!data.header_length) {
modepage = 6;
+ first_len = 0;
sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
}
@@ -2058,30 +2071,61 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
*/
if (len < 3)
goto bad_sense;
- if (len > 20)
- len = 20;
-
- /* Take headers and block descriptors into account */
- len += data.header_length + data.block_descriptor_length;
- if (len > SD_BUF_SIZE)
- goto bad_sense;
+ else if (len > SD_BUF_SIZE) {
+ sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
+ "data from %d to %d bytes\n", len, SD_BUF_SIZE);
+ len = SD_BUF_SIZE;
+ }
+ if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
+ len = 192;
/* Get the data */
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
+ if (len > first_len)
+ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
+ &data, &sshdr);
if (scsi_status_is_good(res)) {
int offset = data.header_length + data.block_descriptor_length;
- if (offset >= SD_BUF_SIZE - 2) {
- sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
- goto defaults;
+ while (offset < len) {
+ u8 page_code = buffer[offset] & 0x3F;
+ u8 spf = buffer[offset] & 0x40;
+
+ if (page_code == 8 || page_code == 6) {
+ /* We're interested only in the first 3 bytes.
+ */
+ if (len - offset <= 2) {
+ sd_printk(KERN_ERR, sdkp, "Incomplete "
+ "mode parameter data\n");
+ goto defaults;
+ } else {
+ modepage = page_code;
+ goto Page_found;
+ }
+ } else {
+ /* Go to the next page */
+ if (spf && len - offset > 3)
+ offset += 4 + (buffer[offset+2] << 8) +
+ buffer[offset+3];
+ else if (!spf && len - offset > 1)
+ offset += 2 + buffer[offset+1];
+ else {
+ sd_printk(KERN_ERR, sdkp, "Incomplete "
+ "mode parameter data\n");
+ goto defaults;
+ }
+ }
}
- if ((buffer[offset] & 0x3f) != modepage) {
+ if (modepage == 0x3F) {
+ sd_printk(KERN_ERR, sdkp, "No Caching mode page "
+ "present\n");
+ goto defaults;
+ } else if ((buffer[offset] & 0x3f) != modepage) {
sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
goto defaults;
}
-
+ Page_found:
if (modepage == 8) {
sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 9f4b58b7daad..7e22b737dfd8 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -307,7 +307,7 @@ static inline int find_and_clear_bit_16(unsigned long *field)
"0: bsfw %1,%w0\n\t"
"btr %0,%1\n\t"
"jnc 0b"
- : "=&r" (rv), "=m" (*field) :);
+ : "=&r" (rv), "+m" (*field) :);
return rv;
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index aed4e464d31c..dee2a2c909f5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -31,7 +31,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
@@ -95,17 +95,17 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
if (sc->device->tagged_supported) {
switch (sc->tag) {
case HEAD_OF_QUEUE_TAG:
- sam_task_attr = TASK_ATTR_HOQ;
+ sam_task_attr = MSG_HEAD_TAG;
break;
case ORDERED_QUEUE_TAG:
- sam_task_attr = TASK_ATTR_ORDERED;
+ sam_task_attr = MSG_ORDERED_TAG;
break;
default:
- sam_task_attr = TASK_ATTR_SIMPLE;
+ sam_task_attr = MSG_SIMPLE_TAG;
break;
}
} else
- sam_task_attr = TASK_ATTR_SIMPLE;
+ sam_task_attr = MSG_SIMPLE_TAG;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
@@ -379,7 +379,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
- DMA_NONE, TASK_ATTR_SIMPLE,
+ DMA_NONE, MSG_SIMPLE_TAG,
&tl_cmd->tl_sense_buf[0]);
/*
* Allocate the LUN_RESET TMR
@@ -939,18 +939,6 @@ static u16 tcm_loop_get_fabric_sense_len(void)
return 0;
}
-static u64 tcm_loop_pack_lun(unsigned int lun)
-{
- u64 result;
-
- /* LSB of lun into byte 1 big-endian */
- result = ((lun & 0xff) << 8);
- /* use flat space addressing method */
- result |= 0x40 | ((lun >> 8) & 0x3f);
-
- return cpu_to_le64(result);
-}
-
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
{
switch (tl_hba->tl_proto_id) {
@@ -1481,7 +1469,6 @@ static int tcm_loop_register_configfs(void)
fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
- fabric->tf_ops.pack_lun = &tcm_loop_pack_lun;
tf_cg = &fabric->tf_group;
/*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index a5f44a6e6e1d..ee6fad979b50 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -497,10 +497,6 @@ static int target_fabric_tf_ops_check(
printk(KERN_ERR "Missing tfo->is_state_remove()\n");
return -EINVAL;
}
- if (!(tfo->pack_lun)) {
- printk(KERN_ERR "Missing tfo->pack_lun()\n");
- return -EINVAL;
- }
/*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d25e20829012..8407f9ca2b31 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -38,6 +38,7 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -150,13 +151,13 @@ out:
{
struct se_device *dev = se_lun->lun_se_dev;
- spin_lock(&dev->stats_lock);
+ spin_lock_irq(&dev->stats_lock);
dev->num_cmds++;
if (se_cmd->data_direction == DMA_TO_DEVICE)
dev->write_bytes += se_cmd->data_length;
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
dev->read_bytes += se_cmd->data_length;
- spin_unlock(&dev->stats_lock);
+ spin_unlock_irq(&dev->stats_lock);
}
/*
@@ -658,8 +659,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
struct se_session *se_sess = SE_SESS(se_cmd);
struct se_task *se_task;
unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
- u32 cdb_offset = 0, lun_count = 0, offset = 8;
- u64 i, lun;
+ u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
break;
@@ -675,15 +675,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
if (!(se_sess)) {
- lun = 0;
- buf[offset++] = ((lun >> 56) & 0xff);
- buf[offset++] = ((lun >> 48) & 0xff);
- buf[offset++] = ((lun >> 40) & 0xff);
- buf[offset++] = ((lun >> 32) & 0xff);
- buf[offset++] = ((lun >> 24) & 0xff);
- buf[offset++] = ((lun >> 16) & 0xff);
- buf[offset++] = ((lun >> 8) & 0xff);
- buf[offset++] = (lun & 0xff);
+ int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
lun_count = 1;
goto done;
}
@@ -703,15 +695,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
if ((cdb_offset + 8) >= se_cmd->data_length)
continue;
- lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
- buf[offset++] = ((lun >> 56) & 0xff);
- buf[offset++] = ((lun >> 48) & 0xff);
- buf[offset++] = ((lun >> 40) & 0xff);
- buf[offset++] = ((lun >> 32) & 0xff);
- buf[offset++] = ((lun >> 24) & 0xff);
- buf[offset++] = ((lun >> 16) & 0xff);
- buf[offset++] = ((lun >> 8) & 0xff);
- buf[offset++] = (lun & 0xff);
+ int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ offset += 8;
cdb_offset += 8;
}
spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7ff6a35f26ac..331d423fd0e0 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -41,7 +41,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -911,7 +911,7 @@ static int pscsi_do_task(struct se_task *task)
* descriptor
*/
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
- (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
+ (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 4a109835e420..59b8b9c5ad72 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -55,7 +55,8 @@ struct se_tmr_req *core_tmr_alloc_req(
{
struct se_tmr_req *tmr;
- tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
+ tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
+ GFP_ATOMIC : GFP_KERNEL);
if (!(tmr)) {
printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
@@ -398,9 +399,9 @@ int core_tmr_lun_reset(
printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
}
- spin_lock(&dev->stats_lock);
+ spin_lock_irq(&dev->stats_lock);
dev->num_resets++;
- spin_unlock(&dev->stats_lock);
+ spin_unlock_irq(&dev->stats_lock);
DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b9d3501bdd91..4dafeb8b5638 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -42,7 +42,7 @@
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -762,7 +762,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
transport_all_task_dev_remove_state(cmd);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- transport_free_dev_tasks(cmd);
check_lun:
spin_lock_irqsave(&lun->lun_cmd_lock, flags);
@@ -1075,7 +1074,7 @@ static inline int transport_add_task_check_sam_attr(
* head of the struct se_device->execute_task_list, and task_prev
* after that for each subsequent task
*/
- if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
list_add(&task->t_execute_list,
(task_prev != NULL) ?
&task_prev->t_execute_list :
@@ -1195,6 +1194,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
break;
list_del(&task->t_execute_list);
+ atomic_set(&task->task_execute_queue, 0);
atomic_dec(&dev->execute_tasks);
return task;
@@ -1210,8 +1210,14 @@ void transport_remove_task_from_execute_queue(
{
unsigned long flags;
+ if (atomic_read(&task->task_execute_queue) == 0) {
+ dump_stack();
+ return;
+ }
+
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_del(&task->t_execute_list);
+ atomic_set(&task->task_execute_queue, 0);
atomic_dec(&dev->execute_tasks);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
@@ -1867,7 +1873,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
return 0;
- if (cmd->sam_task_attr == TASK_ATTR_ACA) {
+ if (cmd->sam_task_attr == MSG_ACA_TAG) {
DEBUG_STA("SAM Task Attribute ACA"
" emulation is not supported\n");
return -1;
@@ -2058,6 +2064,13 @@ int transport_generic_handle_tmr(
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
+void transport_generic_free_cmd_intr(
+ struct se_cmd *cmd)
+{
+ transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
+}
+EXPORT_SYMBOL(transport_generic_free_cmd_intr);
+
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
struct se_task *task, *task_tmp;
@@ -2504,7 +2517,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
- if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
smp_mb__after_atomic_inc();
DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
@@ -2512,7 +2525,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
T_TASK(cmd)->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
- } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
list_add_tail(&cmd->se_ordered_list,
&SE_DEV(cmd)->ordered_cmd_list);
@@ -3411,7 +3424,7 @@ static int transport_generic_cmd_sequencer(
* See spc4r17 section 5.3
*/
if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
break;
case READ_BUFFER:
@@ -3619,7 +3632,7 @@ static int transport_generic_cmd_sequencer(
* See spc4r17 section 5.3
*/
if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
break;
default:
@@ -3777,21 +3790,21 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
struct se_cmd *cmd_p, *cmd_tmp;
int new_active_tasks = 0;
- if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
+ if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_dec(&dev->dev_hoq_count);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&dev->ordered_cmd_lock);
list_del(&cmd->se_ordered_list);
atomic_dec(&dev->dev_ordered_sync);
@@ -3824,7 +3837,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
new_active_tasks++;
spin_lock(&dev->delayed_cmd_lock);
- if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
+ if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
break;
}
spin_unlock(&dev->delayed_cmd_lock);
@@ -4776,18 +4789,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
sg_end_cur->page_link &= ~0x02;
sg_chain(sg_head, task_sg_num, sg_head_cur);
- sg_count += (task->task_sg_num + 1);
- } else
sg_count += task->task_sg_num;
+ task_sg_num = (task->task_sg_num + 1);
+ } else {
+ sg_chain(sg_head, task_sg_num, sg_head_cur);
+ sg_count += task->task_sg_num;
+ task_sg_num = task->task_sg_num;
+ }
sg_head = sg_head_cur;
sg_link = sg_link_cur;
- task_sg_num = task->task_sg_num;
continue;
}
sg_head = sg_first = &task->task_sg[0];
sg_link = &task->task_sg[task->task_sg_num];
- task_sg_num = task->task_sg_num;
/*
* Check for single task..
*/
@@ -4798,9 +4813,12 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
*/
sg_end = &task->task_sg[task->task_sg_num - 1];
sg_end->page_link &= ~0x02;
- sg_count += (task->task_sg_num + 1);
- } else
sg_count += task->task_sg_num;
+ task_sg_num = (task->task_sg_num + 1);
+ } else {
+ sg_count += task->task_sg_num;
+ task_sg_num = task->task_sg_num;
+ }
}
/*
* Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4809,21 +4827,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
T_TASK(cmd)->t_tasks_sg_chained = sg_first;
T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
- DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
- " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
+ DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
+ " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
T_TASK(cmd)->t_tasks_sg_chained_no);
for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
T_TASK(cmd)->t_tasks_sg_chained_no, i) {
- DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
- sg, sg_page(sg), sg->length, sg->offset);
+ DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
+ i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
if (sg_is_chain(sg))
DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
if (sg_is_last(sg))
DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
}
-
}
EXPORT_SYMBOL(transport_do_task_sg_chain);
@@ -5297,6 +5314,8 @@ void transport_generic_free_cmd(
if (wait_for_tasks && cmd->transport_wait_for_tasks)
cmd->transport_wait_for_tasks(cmd, 0, 0);
+ transport_free_dev_tasks(cmd);
+
transport_generic_remove(cmd, release_to_pool,
session_reinstatement);
}
@@ -6132,6 +6151,9 @@ get_cmd:
case TRANSPORT_REMOVE:
transport_generic_remove(cmd, 1, 0);
break;
+ case TRANSPORT_FREE_CMD_INTR:
+ transport_generic_free_cmd(cmd, 0, 1, 0);
+ break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
break;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 49e51778f733..c056a1132ae1 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -35,6 +35,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
@@ -592,8 +593,25 @@ static void ft_send_cmd(struct ft_cmd *cmd)
case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
goto err; /* TBD not supported by tcm_fc yet */
}
+ /*
+ * Locate the SAM Task Attr from fc_pri_ta
+ */
+ switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
+ case FCP_PTA_HEADQ:
+ task_attr = MSG_HEAD_TAG;
+ break;
+ case FCP_PTA_ORDERED:
+ task_attr = MSG_ORDERED_TAG;
+ break;
+ case FCP_PTA_ACA:
+ task_attr = MSG_ACA_TAG;
+ break;
+ case FCP_PTA_SIMPLE: /* Fallthrough */
+ default:
+ task_attr = MSG_SIMPLE_TAG;
+ }
+
- /* FCP_PTA_ maps 1:1 to TASK_ATTR_ */
task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
data_len = ntohl(fcp->fc_dl);
cmd->cdb = fcp->fc_cdb;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index fcdbbffe88cc..84e868c255dd 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -519,13 +519,6 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
return tpg->index;
}
-static u64 ft_pack_lun(unsigned int index)
-{
- WARN_ON(index >= 256);
- /* Caller wants this byte-swapped */
- return cpu_to_le64((index & 0xff) << 8);
-}
-
static struct target_core_fabric_ops ft_fabric_ops = {
.get_fabric_name = ft_get_fabric_name,
.get_fabric_proto_ident = fc_get_fabric_proto_ident,
@@ -564,7 +557,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.get_fabric_sense_len = ft_get_fabric_sense_len,
.set_fabric_sense_len = ft_set_fabric_sense_len,
.is_state_remove = ft_is_state_remove,
- .pack_lun = ft_pack_lun,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 8f6bb9c7f3eb..ee866060f8a4 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -604,6 +604,7 @@ struct sas_domain_function_template {
int (*lldd_clear_aca)(struct domain_device *, u8 *lun);
int (*lldd_clear_task_set)(struct domain_device *, u8 *lun);
int (*lldd_I_T_nexus_reset)(struct domain_device *);
+ int (*lldd_ata_soft_reset)(struct domain_device *);
int (*lldd_lu_reset)(struct domain_device *, u8 *lun);
int (*lldd_query_task)(struct sas_task *);
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index d6e7994aa634..81dd12edc38c 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -9,6 +9,7 @@
#define MSG_SIMPLE_TAG 0x20
#define MSG_HEAD_TAG 0x21
#define MSG_ORDERED_TAG 0x22
+#define MSG_ACA_TAG 0x24 /* unsupported */
#define SCSI_NO_TAG (-1) /* identify no tag in use */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1d3b5b2f0dbc..561ac99def5a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -98,6 +98,7 @@ enum transport_state_table {
TRANSPORT_REMOVE = 14,
TRANSPORT_FREE = 15,
TRANSPORT_NEW_CMD_MAP = 16,
+ TRANSPORT_FREE_CMD_INTR = 17,
};
/* Used for struct se_cmd->se_cmd_flags */
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
index dc78f77f9450..747e1404dca0 100644
--- a/include/target/target_core_fabric_ops.h
+++ b/include/target/target_core_fabric_ops.h
@@ -77,7 +77,6 @@ struct target_core_fabric_ops {
u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
u16 (*get_fabric_sense_len)(void);
int (*is_state_remove)(struct se_cmd *);
- u64 (*pack_lun)(unsigned int);
/*
* fabric module calls for target_core_fabric_configfs.c
*/
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 59aa464f6ee2..24a1c6cb83c3 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -172,6 +172,7 @@ extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
+extern void transport_generic_free_cmd_intr(struct se_cmd *);
extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,