From cbaa42213461e9a722a391b3800d7c111de7049b Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 3 Sep 2014 00:00:39 -0500 Subject: libiscsi: fix potential buffer overrun in __iscsi_conn_send_pdu This patches fixes a potential buffer overrun in __iscsi_conn_send_pdu. This function is used by iscsi drivers and userspace to send iscsi PDUs/ commands. For login commands, we have a set buffer size. For all other commands we do not support data buffers. This was reported by Dan Carpenter here: http://www.spinics.net/lists/linux-scsi/msg66838.html Reported-by: Dan Carpenter Signed-off-by: Mike Christie Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/scsi/libiscsi.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index ea025e4806b6..191b59793519 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, return NULL; } + if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) { + iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN); + return NULL; + } + task = conn->login_task; } else { if (session->state != ISCSI_STATE_LOGGED_IN) return NULL; + if (data_size != 0) { + iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode); + return NULL; + } + BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); -- cgit From 4b8c6ba616deab7ddf2725f5833cf027a149987f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 12 Sep 2014 16:00:32 -0700 Subject: scsi: fix regression that accidentally disabled block-based tcq Please try the fix below, looks like the commit broke TCQ for all drivers using block-level tagging. Signed-off-by: Christoph Hellwig --- include/scsi/scsi_tcq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index cdcc90b07ecb..e64583560701 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h @@ -68,7 +68,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) return; if (!shost_use_blk_mq(sdev->host) && - blk_queue_tagged(sdev->request_queue)) + !blk_queue_tagged(sdev->request_queue)) blk_queue_init_tags(sdev->request_queue, depth, sdev->host->bqt); -- cgit From 74cf298fedfcb732335cae5d157e95295e87cf2a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 16 Aug 2014 14:15:11 -0700 Subject: scsi: fix various kernel-doc problems in scsi_error.c Convert spaces to tabs in kernel-doc notation. Correct duplicated (copy-paste) kernel-doc comments that are incorrect. Fix kernel-doc warning: Warning(..//drivers/scsi/scsi_error.c:1647): No description found for parameter 'shost' Signed-off-by: Randy Dunlap Reviewed-by: Ewan D. Milne Signed-off-by: Christoph Hellwig --- drivers/scsi/scsi_error.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 5db8454474ee..6b20ef3fee54 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1238,9 +1238,9 @@ retry_tur: /** * scsi_eh_test_devices - check if devices are responding from error recovery. * @cmd_list: scsi commands in error recovery. - * @work_q: queue for commands which still need more error recovery - * @done_q: queue for commands which are finished - * @try_stu: boolean on if a STU command should be tried in addition to TUR. + * @work_q: queue for commands which still need more error recovery + * @done_q: queue for commands which are finished + * @try_stu: boolean on if a STU command should be tried in addition to TUR. * * Decription: * Tests if devices are in a working state. Commands to devices now in @@ -1373,7 +1373,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd) /** * scsi_eh_stu - send START_UNIT if needed * @shost: &scsi host being recovered. - * @work_q: &list_head for pending commands. + * @work_q: &list_head for pending commands. * @done_q: &list_head for processed commands. * * Notes: @@ -1436,7 +1436,7 @@ static int scsi_eh_stu(struct Scsi_Host *shost, /** * scsi_eh_bus_device_reset - send bdr if needed * @shost: scsi host being recovered. - * @work_q: &list_head for pending commands. + * @work_q: &list_head for pending commands. * @done_q: &list_head for processed commands. * * Notes: @@ -1502,7 +1502,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, /** * scsi_eh_target_reset - send target reset if needed * @shost: scsi host being recovered. - * @work_q: &list_head for pending commands. + * @work_q: &list_head for pending commands. * @done_q: &list_head for processed commands. * * Notes: @@ -1567,7 +1567,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, /** * scsi_eh_bus_reset - send a bus reset * @shost: &scsi host being recovered. - * @work_q: &list_head for pending commands. + * @work_q: &list_head for pending commands. * @done_q: &list_head for processed commands. */ static int scsi_eh_bus_reset(struct Scsi_Host *shost, @@ -1638,8 +1638,9 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, /** * scsi_eh_host_reset - send a host reset - * @work_q: list_head for processed commands. - * @done_q: list_head for processed commands. + * @shost: host to be reset. + * @work_q: &list_head for pending commands. + * @done_q: &list_head for processed commands. */ static int scsi_eh_host_reset(struct Scsi_Host *shost, struct list_head *work_q, @@ -1677,8 +1678,8 @@ static int scsi_eh_host_reset(struct Scsi_Host *shost, /** * scsi_eh_offline_sdevs - offline scsi devices that fail to recover - * @work_q: list_head for processed commands. - * @done_q: list_head for processed commands. + * @work_q: &list_head for pending commands. + * @done_q: &list_head for processed commands. */ static void scsi_eh_offline_sdevs(struct list_head *work_q, struct list_head *done_q) @@ -2043,8 +2044,8 @@ static void scsi_restart_operations(struct Scsi_Host *shost) /** * scsi_eh_ready_devs - check device ready state and recover if not. - * @shost: host to be recovered. - * @work_q: &list_head for pending commands. + * @shost: host to be recovered. + * @work_q: &list_head for pending commands. * @done_q: &list_head for processed commands. */ void scsi_eh_ready_devs(struct Scsi_Host *shost, -- cgit From 2eefd57b97609949ae40952da2dea338e7d9a125 Mon Sep 17 00:00:00 2001 From: Sujit Reddy Thumma Date: Mon, 11 Aug 2014 15:40:37 +0300 Subject: sd: Avoid sending medium write commands if device is write protected The SYNCHRONIZE_CACHE command is a medium write command and hence can fail when the device is write protected. Avoid sending such commands by making sure that write-cache-enable is disabled even though the device claim to support it. Signed-off-by: Sujit Reddy Thumma Signed-off-by: Dolev Raviv Reviewed-by: Martin K. Petersen Reviewed-by: Venkatesh Srinivas Signed-off-by: Christoph Hellwig --- drivers/scsi/sd.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 2c2041ca4b70..aa43496b7b93 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -185,7 +185,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr, if (ct < 0) return -EINVAL; rcd = ct & 0x01 ? 1 : 0; - wce = ct & 0x02 ? 1 : 0; + wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; if (sdkp->cache_override) { sdkp->WCE = wce; @@ -2490,6 +2490,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) sdkp->DPOFUA = 0; } + /* No cache flush allowed for write protected devices */ + if (sdkp->WCE && sdkp->write_prot) + sdkp->WCE = 0; + if (sdkp->first_scan || old_wce != sdkp->WCE || old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA) sd_printk(KERN_NOTICE, sdkp, -- cgit From 64bdcbc449105377dd60c8da97cfc1663b39562c Mon Sep 17 00:00:00 2001 From: "Kashyap.Desai@avagotech.com" Date: Wed, 20 Aug 2014 19:24:33 +0530 Subject: scsi: add use_cmd_list flag Add a use_cmd_list flag in struct Scsi_Host to request keeping track of all outstanding commands per device. Default behaviour is not to keep track of cmd_list per sdev, as this may introduce lock contention. (overhead is more on multi-node NUMA.), and only enable it on the two drivers that need it. Signed-off-by: Kashyap Desai Reviewed-by: Martin K. Petersen Reviewed-by: Bart Van Assche Signed-off-by: Christoph Hellwig --- drivers/scsi/aacraid/linit.c | 1 + drivers/scsi/dpt_i2o.c | 1 + drivers/scsi/scsi_lib.c | 24 ++++++++++++------------ include/scsi/scsi_host.h | 1 + 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 63f576c9300a..a759cb2d4b15 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1152,6 +1152,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) shost->irq = pdev->irq; shost->unique_id = unique_id; shost->max_cmd_len = 16; + shost->use_cmd_list = 1; aac = (struct aac_dev *)shost->hostdata; aac->base_start = pci_resource_start(pdev, 0); diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 67283ef418ac..072f0ec2851e 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -2363,6 +2363,7 @@ static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht) host->unique_id = (u32)sys_tbl_pa + pHba->unit; host->sg_tablesize = pHba->sg_tablesize; host->can_queue = pHba->post_fifo_size; + host->use_cmd_list = 1; return 0; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index d837dc180522..b9a8ddd77eef 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -645,16 +645,18 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; + struct Scsi_Host *shost = sdev->host; unsigned long flags; - BUG_ON(list_empty(&cmd->list)); - scsi_mq_free_sgtables(cmd); scsi_uninit_cmd(cmd); - spin_lock_irqsave(&sdev->list_lock, flags); - list_del_init(&cmd->list); - spin_unlock_irqrestore(&sdev->list_lock, flags); + if (shost->use_cmd_list) { + BUG_ON(list_empty(&cmd->list)); + spin_lock_irqsave(&sdev->list_lock, flags); + list_del_init(&cmd->list); + spin_unlock_irqrestore(&sdev->list_lock, flags); + } } /* @@ -1815,13 +1817,11 @@ static int scsi_mq_prep_fn(struct request *req) INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); cmd->jiffies_at_alloc = jiffies; - /* - * XXX: cmd_list lookups are only used by two drivers, try to get - * rid of this list in common code. - */ - spin_lock_irq(&sdev->list_lock); - list_add_tail(&cmd->list, &sdev->cmd_list); - spin_unlock_irq(&sdev->list_lock); + if (shost->use_cmd_list) { + spin_lock_irq(&sdev->list_lock); + list_add_tail(&cmd->list, &sdev->cmd_list); + spin_unlock_irq(&sdev->list_lock); + } sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; cmd->sdb.table.sgl = sg; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index ba2034779961..cafb260ef2d3 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -680,6 +680,7 @@ struct Scsi_Host { unsigned no_write_same:1; unsigned use_blk_mq:1; + unsigned use_cmd_list:1; /* * Optional work queue to be utilized by the transport -- cgit From 50c4e96411a6cd728f04cf70d8d6def57828b320 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 2 Sep 2014 11:35:50 -0400 Subject: scsi: don't store LUN bits in CDB[1] for USB mass-storage devices The SCSI specification requires that the second Command Data Byte should contain the LUN value in its high-order bits if the recipient device reports SCSI level 2 or below. Nevertheless, some USB mass-storage devices use those bits for other purposes in vendor-specific commands. Currently Linux has no way to send such commands, because the SCSI stack always overwrites the LUN bits. Testing shows that Windows 7 and XP do not store the LUN bits in the CDB when sending commands to a USB device. This doesn't matter if the device uses the Bulk-Only or UAS transports (which virtually all modern USB mass-storage devices do), as these have a separate mechanism for sending the LUN value. Therefore this patch introduces a flag in the Scsi_Host structure to inform the SCSI midlayer that a transport does not require the LUN bits to be stored in the CDB, and it makes usb-storage set this flag for all devices using the Bulk-Only transport. (UAS is handled by a separate driver, but it doesn't really matter because no SCSI-2 or lower device is at all likely to use UAS.) The patch also cleans up the code responsible for storing the LUN value by adding a bitflag to the scsi_device structure. The test for whether to stick the LUN value in the CDB can be made when the device is probed, and stored for future use rather than being made over and over in the fast path. Signed-off-by: Alan Stern Reported-by: Tiziano Bacocco Acked-by: Martin K. Petersen Acked-by: James Bottomley Signed-off-by: Christoph Hellwig --- drivers/scsi/scsi.c | 8 ++------ drivers/scsi/scsi_scan.c | 10 ++++++++++ drivers/scsi/scsi_sysfs.c | 12 ++++++++++++ drivers/usb/storage/usb.c | 8 ++++++++ include/scsi/scsi_device.h | 1 + include/scsi/scsi_host.h | 3 +++ 6 files changed, 36 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index d81f3cc43ff1..1423cb17fbfd 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -670,14 +670,10 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) return SCSI_MLQUEUE_DEVICE_BUSY; } - /* - * If SCSI-2 or lower, store the LUN value in cmnd. - */ - if (cmd->device->scsi_level <= SCSI_2 && - cmd->device->scsi_level != SCSI_UNKNOWN) { + /* Store the LUN value in cmnd, if needed. */ + if (cmd->device->lun_in_cdb) cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | (cmd->device->lun << 5 & 0xe0); - } scsi_log_send(cmd); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 56675dbbf681..f84c40188478 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -736,6 +736,16 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, sdev->scsi_level++; sdev->sdev_target->scsi_level = sdev->scsi_level; + /* + * If SCSI-2 or lower, and if the transport requires it, + * store the LUN value in CDB[1]. + */ + sdev->lun_in_cdb = 0; + if (sdev->scsi_level <= SCSI_2 && + sdev->scsi_level != SCSI_UNKNOWN && + !sdev->host->no_scsi2_lun_in_cdb) + sdev->lun_in_cdb = 1; + return 0; } diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 8b4105a22ac2..85e36f3a5585 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1263,7 +1263,19 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev) sdev->sdev_dev.class = &sdev_class; dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu", sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); + /* + * Get a default scsi_level from the target (derived from sibling + * devices). This is the best we can do for guessing how to set + * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the + * setting doesn't matter, because all the bits are zero anyway. + * But it does matter for higher LUNs. + */ sdev->scsi_level = starget->scsi_level; + if (sdev->scsi_level <= SCSI_2 && + sdev->scsi_level != SCSI_UNKNOWN && + !shost->no_scsi2_lun_in_cdb) + sdev->lun_in_cdb = 1; + transport_setup_device(&sdev->sdev_gendev); spin_lock_irqsave(shost->host_lock, flags); list_add_tail(&sdev->same_target_siblings, &starget->devices); diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index cedb29252a92..bf3f8e2de046 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -983,6 +983,14 @@ int usb_stor_probe2(struct us_data *us) if (!(us->fflags & US_FL_SCM_MULT_TARG)) us_to_host(us)->max_id = 1; + /* + * Like Windows, we won't store the LUN bits in CDB[1] for SCSI-2 + * devices using the Bulk-Only transport (even though this violates + * the SCSI spec). + */ + if (us->transport == usb_stor_Bulk_transport) + us_to_host(us)->no_scsi2_lun_in_cdb = 1; + /* Find the endpoints and calculate pipe values */ result = get_pipes(us); if (result) diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 1a0d1842962e..27ecee73bd72 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -174,6 +174,7 @@ struct scsi_device { unsigned wce_default_on:1; /* Cache is ON by default */ unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ unsigned broken_fua:1; /* Don't set FUA bit */ + unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ atomic_t disk_events_disable_depth; /* disable depth for disk events */ diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index cafb260ef2d3..d0f69a3210df 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -693,6 +693,9 @@ struct Scsi_Host { */ struct workqueue_struct *tmf_work_q; + /* The transport requires the LUN bits NOT to be stored in CDB[1] */ + unsigned no_scsi2_lun_in_cdb:1; + /* * Value host_blocked counts down from */ -- cgit From 6fe8c1dbefd63ef3988edb745d9eb81fc6d0513c Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Wed, 10 Sep 2014 14:54:09 +0300 Subject: scsi: balance out autopm get/put calls in scsi_sysfs_add_sdev() SCSI Well-known logical units generally don't have any scsi driver associated with it which means no one will call scsi_autopm_put_device() on these wlun scsi devices and this would result in keeping the corresponding scsi device always active (hence LLD can't be suspended as well). Same exact problem can be seen for other scsi device representing normal logical unit whose driver is yet to be loaded. This patch fixes the above problem with this approach: - make the scsi_autopm_put_device call at the end of scsi_sysfs_add_sdev to make it balance out the get earlier in the function. - let drivers do paired get/put calls in their probe methods. Signed-off-by: Subhash Jadavani Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- drivers/scsi/scsi_sysfs.c | 5 +---- drivers/scsi/sd.c | 2 ++ drivers/scsi/sr.c | 2 ++ drivers/scsi/st.c | 2 ++ 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 85e36f3a5585..f4cb7b3e9e23 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1044,10 +1044,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) pm_runtime_enable(&sdev->sdev_gendev); scsi_autopm_put_target(starget); - /* The following call will keep sdev active indefinitely, until - * its driver does a corresponding scsi_autopm_pm_device(). Only - * drivers supporting autosuspend will do this. - */ scsi_autopm_get_device(sdev); error = device_add(&sdev->sdev_gendev); @@ -1085,6 +1081,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) } } + scsi_autopm_put_device(sdev); return error; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index aa43496b7b93..0cb5c9f0c743 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2965,6 +2965,7 @@ static int sd_probe(struct device *dev) int index; int error; + scsi_autopm_get_device(sdp); error = -ENODEV; if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC) goto out; @@ -3041,6 +3042,7 @@ static int sd_probe(struct device *dev) out_free: kfree(sdkp); out: + scsi_autopm_put_device(sdp); return error; } diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 7eeb93627beb..2de44cc58b1a 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -657,6 +657,7 @@ static int sr_probe(struct device *dev) struct scsi_cd *cd; int minor, error; + scsi_autopm_get_device(sdev); error = -ENODEV; if (sdev->type != TYPE_ROM && sdev->type != TYPE_WORM) goto fail; @@ -744,6 +745,7 @@ fail_put: fail_free: kfree(cd); fail: + scsi_autopm_put_device(sdev); return error; } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index aff9689de0f7..d3fd6e8fb378 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -4105,6 +4105,7 @@ static int st_probe(struct device *dev) return -ENODEV; } + scsi_autopm_get_device(SDp); i = queue_max_segments(SDp->request_queue); if (st_max_sg_segs < i) i = st_max_sg_segs; @@ -4244,6 +4245,7 @@ out_put_disk: out_buffer_free: kfree(buffer); out: + scsi_autopm_put_device(SDp); return -ENODEV; }; -- cgit From ed81d7741eddb690aa9d5c79945123db11ec7a0c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Jul 2014 09:34:54 +0200 Subject: eata: remove driver_lock port_detect is only called from the module_init routine and thus implicitly serialized, so remove the driver lock which was held over potentially sleeping function calls. Signed-off-by: Christoph Hellwig Reported-by: Arthur Marsh Tested-by: Arthur Marsh Reviewed-by: Martin K. Petersen Reviewed-by: Hannes Reinecke --- drivers/scsi/eata.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index 813dd5c998e4..943ad3a19661 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c @@ -837,7 +837,6 @@ struct hostdata { static struct Scsi_Host *sh[MAX_BOARDS]; static const char *driver_name = "EATA"; static char sha[MAX_BOARDS]; -static DEFINE_SPINLOCK(driver_lock); /* Initialize num_boards so that ihdlr can work while detect is in progress */ static unsigned int num_boards = MAX_BOARDS; @@ -1097,8 +1096,6 @@ static int port_detect(unsigned long port_base, unsigned int j, goto fail; } - spin_lock_irq(&driver_lock); - if (do_dma(port_base, 0, READ_CONFIG_PIO)) { #if defined(DEBUG_DETECT) printk("%s: detect, do_dma failed at 0x%03lx.\n", name, @@ -1264,10 +1261,7 @@ static int port_detect(unsigned long port_base, unsigned int j, } #endif - spin_unlock_irq(&driver_lock); sh[j] = shost = scsi_register(tpnt, sizeof(struct hostdata)); - spin_lock_irq(&driver_lock); - if (shost == NULL) { printk("%s: unable to register host, detaching.\n", name); goto freedma; @@ -1344,8 +1338,6 @@ static int port_detect(unsigned long port_base, unsigned int j, else sprintf(dma_name, "DMA %u", dma_channel); - spin_unlock_irq(&driver_lock); - for (i = 0; i < shost->can_queue; i++) ha->cp[i].cp_dma_addr = pci_map_single(ha->pdev, &ha->cp[i], @@ -1438,7 +1430,6 @@ static int port_detect(unsigned long port_base, unsigned int j, freeirq: free_irq(irq, &sha[j]); freelock: - spin_unlock_irq(&driver_lock); release_region(port_base, REGION_SIZE); fail: return 0; -- cgit From 49bd1a8f96d7b169edb11eb41e084b1b1669557a Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:41 +0200 Subject: hpsa: Fallback to MSI rather than to INTx if MSI-X failed Currently the driver falls back to INTx mode when MSI-X initialization failed. This is a suboptimal behaviour for chips that also support MSI. This update changes that behaviour and falls back to MSI mode in case MSI-X mode initialization failed. Signed-off-by: Alexander Gordeev Acked-by: "Stephen M. Cameron" Cc: iss_storagedev@hp.com Cc: linux-scsi@vger.kernel.org Cc: linux-pci@vger.kernel.org Signed-off-by: Christoph Hellwig --- drivers/scsi/hpsa.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 6b35d0dfe64c..a9c4c9f05bb4 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6176,7 +6176,6 @@ static void hpsa_interrupt_mode(struct ctlr_info *h) dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); h->msix_vector = 0; - goto default_int_mode; } } if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { -- cgit From 18fce3c440c762ab4dfb6156bbd3c0beb0f67f17 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:42 +0200 Subject: hpsa: Use pci_enable_msix_range() instead of pci_enable_msix() As result of deprecation of MSI-X/MSI enablement functions pci_enable_msix() and pci_enable_msi_block() all drivers using these two interfaces need to be updated to use the new pci_enable_msi_range() or pci_enable_msi_exact() and pci_enable_msix_range() or pci_enable_msix_exact() interfaces. Signed-off-by: Alexander Gordeev Acked-by: "Stephen M. Cameron" Cc: iss_storagedev@hp.com Cc: linux-scsi@vger.kernel.org Cc: linux-pci@vger.kernel.org Signed-off-by: Christoph Hellwig --- drivers/scsi/hpsa.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index a9c4c9f05bb4..8c17c432dcd5 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6159,25 +6159,22 @@ static void hpsa_interrupt_mode(struct ctlr_info *h) h->msix_vector = MAX_REPLY_QUEUES; if (h->msix_vector > num_online_cpus()) h->msix_vector = num_online_cpus(); - err = pci_enable_msix(h->pdev, hpsa_msix_entries, - h->msix_vector); - if (err > 0) { + err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, + 1, h->msix_vector); + if (err < 0) { + dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); + h->msix_vector = 0; + goto single_msi_mode; + } else if (err < h->msix_vector) { dev_warn(&h->pdev->dev, "only %d MSI-X vectors " "available\n", err); - h->msix_vector = err; - err = pci_enable_msix(h->pdev, hpsa_msix_entries, - h->msix_vector); - } - if (!err) { - for (i = 0; i < h->msix_vector; i++) - h->intr[i] = hpsa_msix_entries[i].vector; - return; - } else { - dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", - err); - h->msix_vector = 0; } + h->msix_vector = err; + for (i = 0; i < h->msix_vector; i++) + h->intr[i] = hpsa_msix_entries[i].vector; + return; } +single_msi_mode: if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { dev_info(&h->pdev->dev, "MSI\n"); if (!pci_enable_msi(h->pdev)) -- cgit From dd0881281d98e7d3d5e34224058a5d1389241126 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:43 +0200 Subject: megaraid: Fail resume if MSI-X re-initialization failed Currently the driver fails to analize MSI-X re-enablement status on resuming and always assumes the success. This update checks the MSI-X initialization result and fails to resume if MSI-Xs re-enablement failed. Signed-off-by: Alexander Gordeev Acked-by: Kashyap Desai Cc: Neela Syam Kolli Cc: linux-scsi@vger.kernel.org Cc: linux-pci@vger.kernel.org Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas_base.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 22a04e37b70a..e4ffae5346fc 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5132,9 +5132,10 @@ megasas_resume(struct pci_dev *pdev) goto fail_ready_state; /* Now re-enable MSI-X */ - if (instance->msix_vectors) - pci_enable_msix(instance->pdev, instance->msixentry, - instance->msix_vectors); + if (instance->msix_vectors && + pci_enable_msix(instance->pdev, instance->msixentry, + instance->msix_vectors)) + goto fail_reenable_msix; switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: @@ -5243,6 +5244,7 @@ fail_init_mfi: fail_set_dma_mask: fail_ready_state: +fail_reenable_msix: pci_disable_device(pdev); -- cgit From 8ae80ed1734bbe9b2c2021ef1ea981b7d4ccc598 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:44 +0200 Subject: megaraid: Use pci_enable_msix_range() instead of pci_enable_msix() As result of deprecation of MSI-X/MSI enablement functions pci_enable_msix() and pci_enable_msi_block() all drivers using these two interfaces need to be updated to use the new pci_enable_msi_range() or pci_enable_msi_exact() and pci_enable_msix_range() or pci_enable_msix_exact() interfaces. Signed-off-by: Alexander Gordeev Acked-by: Kashyap Desai Cc: Neela Syam Kolli Cc: linux-scsi@vger.kernel.org Cc: linux-pci@vger.kernel.org Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas_base.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index e4ffae5346fc..a2f4d4f6515c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4103,17 +4103,11 @@ static int megasas_init_fw(struct megasas_instance *instance) (unsigned int)num_online_cpus()); for (i = 0; i < instance->msix_vectors; i++) instance->msixentry[i].entry = i; - i = pci_enable_msix(instance->pdev, instance->msixentry, - instance->msix_vectors); - if (i >= 0) { - if (i) { - if (!pci_enable_msix(instance->pdev, - instance->msixentry, i)) - instance->msix_vectors = i; - else - instance->msix_vectors = 0; - } - } else + i = pci_enable_msix_range(instance->pdev, instance->msixentry, + 1, instance->msix_vectors); + if (i) + instance->msix_vectors = i; + else instance->msix_vectors = 0; dev_info(&instance->pdev->dev, "[scsi%d]: FW supports" @@ -5133,8 +5127,8 @@ megasas_resume(struct pci_dev *pdev) /* Now re-enable MSI-X */ if (instance->msix_vectors && - pci_enable_msix(instance->pdev, instance->msixentry, - instance->msix_vectors)) + pci_enable_msix_exact(instance->pdev, instance->msixentry, + instance->msix_vectors)) goto fail_reenable_msix; switch (instance->pdev->device) { -- cgit From 52674c65f9751f607a4ed9d75227a0d8e4f54189 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:45 +0200 Subject: mpt2sas: Use pci_enable_msix_exact() instead of pci_enable_msix() As result of deprecation of MSI-X/MSI enablement functions pci_enable_msix() and pci_enable_msi_block() all drivers using these two interfaces need to be updated to use the new pci_enable_msi_range() or pci_enable_msi_exact() and pci_enable_msix_range() or pci_enable_msix_exact() interfaces. Signed-off-by: Alexander Gordeev Reviewed-by: Tomas Henzl Cc: Nagalakshmi Nandigama Cc: Sreekanth Reddy Cc: support@lsi.com Cc: DL-MPTFusionLinux@lsi.com Cc: linux-scsi@vger.kernel.org Cc: linux-pci@vger.kernel.org Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/mpt2sas_base.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 2f262be890c5..cf51b48dd7d9 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -1414,10 +1414,10 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc) for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) a->entry = i; - r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count); + r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count); if (r) { - dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix " - "failed (r=%d) !!!\n", ioc->name, r)); + dfailprintk(ioc, printk(MPT2SAS_INFO_FMT + "pci_enable_msix_exact failed (r=%d) !!!\n", ioc->name, r)); kfree(entries); goto try_ioapic; } -- cgit From 6bfa6907046b7d97a460abf95f3f5b82c7e31a1e Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:46 +0200 Subject: mpt3sas: Use pci_enable_msix_exact() instead of pci_enable_msix() As result of deprecation of MSI-X/MSI enablement functions pci_enable_msix() and pci_enable_msi_block() all drivers using these two interfaces need to be updated to use the new pci_enable_msi_range() or pci_enable_msi_exact() and pci_enable_msix_range() or pci_enable_msix_exact() interfaces. Signed-off-by: Alexander Gordeev Reviewed-by: Tomas Henzl Cc: Nagalakshmi Nandigama Cc: Sreekanth Reddy Cc: support@lsi.com Cc: DL-MPTFusionLinux@lsi.com Cc: linux-scsi@vger.kernel.org Cc: linux-pci@vger.kernel.org Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt3sas/mpt3sas_base.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 93ce2b2baa41..09e6b21ff4ef 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -1716,10 +1716,10 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) a->entry = i; - r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count); + r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count); if (r) { dfailprintk(ioc, pr_info(MPT3SAS_FMT - "pci_enable_msix failed (r=%d) !!!\n", + "pci_enable_msix_exact failed (r=%d) !!!\n", ioc->name, r)); kfree(entries); goto try_ioapic; -- cgit From 84e32a06f4f8756ce9ec3c8dc7e97896575f0771 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:47 +0200 Subject: qla2xxx: Use pci_enable_msix_range() instead of pci_enable_msix() As result of deprecation of MSI-X/MSI enablement functions pci_enable_msix() and pci_enable_msi_block() all drivers using these two interfaces need to be updated to use the new pci_enable_msi_range() or pci_enable_msi_exact() and pci_enable_msix_range() or pci_enable_msix_exact() interfaces. Log message code 0x00c6 preserved, although it is reported after successful call to pci_enable_msix_range(), not before possibly unsuccessful call to pci_enable_msix(). Consumers of the error code should not notice the difference. Signed-off-by: Alexander Gordeev Acked-by: Chad Dupuis Cc: qla2xxx-upstream@qlogic.com Cc: linux-scsi@vger.kernel.org Cc: linux-pci@vger.kernel.org Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_isr.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 550a4a31f51a..a4855785f54a 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2923,27 +2923,22 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) for (i = 0; i < ha->msix_count; i++) entries[i].entry = i; - ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); - if (ret) { - if (ret < MIN_MSIX_COUNT) - goto msix_failed; - + ret = pci_enable_msix_range(ha->pdev, + entries, MIN_MSIX_COUNT, ha->msix_count); + if (ret < 0) { + ql_log(ql_log_fatal, vha, 0x00c7, + "MSI-X: Failed to enable support, " + "giving up -- %d/%d.\n", + ha->msix_count, ret); + goto msix_out; + } else if (ret < ha->msix_count) { ql_log(ql_log_warn, vha, 0x00c6, "MSI-X: Failed to enable support " "-- %d/%d\n Retry with %d vectors.\n", ha->msix_count, ret, ret); - ha->msix_count = ret; - ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); - if (ret) { -msix_failed: - ql_log(ql_log_fatal, vha, 0x00c7, - "MSI-X: Failed to enable support, " - "giving up -- %d/%d.\n", - ha->msix_count, ret); - goto msix_out; - } - ha->max_rsp_queues = ha->msix_count - 1; } + ha->msix_count = ret; + ha->max_rsp_queues = ha->msix_count - 1; ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_count, GFP_KERNEL); if (!ha->msix_entries) { -- cgit From f6e495a2b317fd7f3693d7c9217abfe943cbb3c6 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Tue, 1 Jul 2014 14:56:20 +0200 Subject: mptfusion: simplify rounding Rounding up to a multiple of 4 should be done using the ALIGN macro. As a bonus, this also makes the generated code smaller. In GetIocFacts(), sz is assigned to a few lines below without being read in the meantime, so it is ok that it doesn't end up with the same value as facts->FWImageSize. Signed-off-by: Rasmus Villemoes Reviewed-by: Joe Lawrence Signed-off-by: Christoph Hellwig --- drivers/message/fusion/mptbase.c | 7 +------ drivers/message/fusion/mptctl.c | 7 +------ 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index a896d948b79e..68f57d3bcca1 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -3172,12 +3172,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) facts->FWImageSize = le32_to_cpu(facts->FWImageSize); } - sz = facts->FWImageSize; - if ( sz & 0x01 ) - sz += 1; - if ( sz & 0x02 ) - sz += 2; - facts->FWImageSize = sz; + facts->FWImageSize = ALIGN(facts->FWImageSize, 4); if (!facts->RequestFrameSize) { /* Something is wrong! */ diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index b0a892a2bf1b..70bb7530b22c 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -1741,12 +1741,7 @@ mptctl_replace_fw (unsigned long arg) /* Allocate memory for the new FW image */ - newFwSize = karg.newImageSize; - - if (newFwSize & 0x01) - newFwSize += 1; - if (newFwSize & 0x02) - newFwSize += 2; + newFwSize = ALIGN(karg.newImageSize, 4); mpt_alloc_fw_memory(ioc, newFwSize); if (ioc->cached_fw == NULL) -- cgit From 34c5801d81b9ae287010888be55ec98a3026cddd Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Fri, 8 Aug 2014 07:38:08 -0400 Subject: qla2xxx: Move mailbox failure messages to a default debug level. Move the mailbox failure messages to a default debugging level so that benign failures won't flood the system logs but will still show up if default debug messaging is enabled (ql2xextended_error_logging=1). Signed-off-by: Giridhar Malavali Signed-off-by: Chad Dupuis Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_mbx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index d9aafc003be2..a7a373f38ca8 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -373,7 +373,7 @@ premature_exit: mbx_done: if (rval) { - ql_log(ql_log_warn, base_vha, 0x1020, + ql_dbg(ql_dbg_disc, base_vha, 0x1020, "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); } else { -- cgit From 8e5a9484aee8d48f7dd3739c139ac684b30e6201 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Fri, 8 Aug 2014 07:38:09 -0400 Subject: qla2xxx: Remove restriction on starting remote device discovery on port update. Limiting which port update events will allow the driver to kick off a name server scan has been problematic in some corner cases so remove the restriction and restore the previous semantic. Also move the link up/down informational messages to the LOOP_UP and LOOP_DOWN events. Signed-off-by: Giridhar Malavali Signed-off-by: Chad Dupuis Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_dbg.c | 2 +- drivers/scsi/qla2xxx/qla_isr.c | 10 ++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index c72ee97bf3f7..41117b3a50f0 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -34,7 +34,7 @@ * | | | 0x5047,0x5052 | * | | | 0x5084,0x5075 | * | | | 0x503d,0x5044 | - * | | | 0x507b | + * | | | 0x507b,0x505f | * | Timer Routines | 0x6012 | | * | User Space Interactions | 0x70e2 | 0x7018,0x702e | * | | | 0x7020,0x7024 | diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index a4855785f54a..bea0e7458ee1 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -730,7 +730,7 @@ skip_rio: else ha->link_data_rate = mb[1]; - ql_dbg(ql_dbg_async, vha, 0x500a, + ql_log(ql_log_info, vha, 0x500a, "LOOP UP detected (%s Gbps).\n", qla2x00_get_link_speed_str(ha, ha->link_data_rate)); @@ -743,7 +743,7 @@ skip_rio: ? RD_REG_WORD(®24->mailbox4) : 0; mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; - ql_dbg(ql_dbg_async, vha, 0x500b, + ql_log(ql_log_info, vha, 0x500b, "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3], mbx); @@ -908,7 +908,8 @@ skip_rio: * it. Otherwise ignore it and Wait for RSCN to come in. */ atomic_set(&vha->loop_down_timer, 0); - if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) { + if (atomic_read(&vha->loop_state) != LOOP_DOWN && + atomic_read(&vha->loop_state) != LOOP_DEAD) { ql_dbg(ql_dbg_async, vha, 0x5011, "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", mb[1], mb[2], mb[3]); @@ -920,9 +921,6 @@ skip_rio: ql_dbg(ql_dbg_async, vha, 0x5012, "Port database changed %04x %04x %04x.\n", mb[1], mb[2], mb[3]); - ql_log(ql_log_warn, vha, 0x505f, - "Link is operational (%s Gbps).\n", - qla2x00_get_link_speed_str(ha, ha->link_data_rate)); /* * Mark all devices as missing so we will login again. -- cgit From fc3850458c176a3cf925771ec822f9537d1dbbc6 Mon Sep 17 00:00:00 2001 From: Hans Wennborg Date: Tue, 5 Aug 2014 21:43:29 -0700 Subject: scsi: fix decimal printf format specifiers prefixed with 0x The prefix suggests the number should be printed in hex, so use the %x specifier to do that. Found by using regex suggested by Joe Perches. Signed-off-by: Hans Wennborg Reviewed-by: Hannes Reinecke Signed-off-by: Christoph Hellwig --- drivers/scsi/cxgbi/libcxgbi.c | 2 +- drivers/scsi/nsp32.c | 2 +- drivers/scsi/qla2xxx/qla_target.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index d65df6dc106f..4cc9787f55bd 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -1807,7 +1807,7 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) u32 credits; log_debug(1 << CXGBI_DBG_PDU_RX, - "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n", + "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n", csk, csk->state, csk->flags, csk->tid, csk->copied_seq, csk->rcv_wup, cdev->rx_credit_thres, cdev->rcv_win); diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 50b086aef178..198f75438496 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c @@ -915,7 +915,7 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s int ret; nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, - "enter. target: 0x%x LUN: 0x%llu cmnd: 0x%x cmndlen: 0x%x " + "enter. target: 0x%x LUN: 0x%llx cmnd: 0x%x cmndlen: 0x%x " "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x", SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len, scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt)); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index e632e14180cf..aebe62c9246d 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2503,7 +2503,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, "iocb(s) %p Returned STATUS.\n", sts); ql_dbg(ql_dbg_tgt, vha, 0xf075, - "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", + "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); -- cgit From 39033413e22aa64944baf21b29e134d52be67038 Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Fri, 8 Aug 2014 00:59:58 -0400 Subject: be2iscsi: Fix the sparse warning introduced in previous submission commit 73af08e11c6638e2abd6b1fa13cdab58c2bbdbf8 Author: Jayamohan Kallickal Date: Mon May 5 21:41:26 2014 -0400 be2iscsi: Fix interrupt Coalescing mechanism. Signed-off-by: John Soni Jose Signed-off-by: Jayamohan Kallickal Reviewed-by: Mike Christie Signed-off-by: Christoph Hellwig --- drivers/scsi/be2iscsi/be_cmds.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index cc7405c0eca0..4e8cb61b3685 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -26,9 +26,9 @@ * The commands are serviced by the ARM processor in the OneConnect's MPU. */ struct be_sge { - u32 pa_lo; - u32 pa_hi; - u32 len; + __le32 pa_lo; + __le32 pa_hi; + __le32 len; }; #define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */ @@ -624,11 +624,11 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) /******************** Modify EQ Delay *******************/ struct be_cmd_req_modify_eq_delay { struct be_cmd_req_hdr hdr; - u32 num_eq; + __le32 num_eq; struct { - u32 eq_id; - u32 phase; - u32 delay_multiplier; + __le32 eq_id; + __le32 phase; + __le32 delay_multiplier; } delay[MAX_CPUS]; } __packed; -- cgit From 65c5efa8166d146c088bef6e004f827c1070d826 Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Fri, 8 Aug 2014 00:59:59 -0400 Subject: be2iscsi: Fix the copyright year Change the copyright year to 2014 Signed-off-by: John Soni Jose Signed-off-by: Jayamohan Kallickal Reviewed-by: Mike Christie Signed-off-by: Christoph Hellwig --- drivers/scsi/be2iscsi/be.h | 2 +- drivers/scsi/be2iscsi/be_cmds.c | 2 +- drivers/scsi/be2iscsi/be_cmds.h | 2 +- drivers/scsi/be2iscsi/be_iscsi.c | 2 +- drivers/scsi/be2iscsi/be_iscsi.h | 2 +- drivers/scsi/be2iscsi/be_main.c | 2 +- drivers/scsi/be2iscsi/be_main.h | 2 +- drivers/scsi/be2iscsi/be_mgmt.c | 2 +- drivers/scsi/be2iscsi/be_mgmt.h | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index 860f527d8f26..81e83a65a193 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 1432ed5e9fc6..ea4477fe27ab 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 4e8cb61b3685..ccda0b6a7324 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 86162811812d..e25203ef1138 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index 31ddc8494398..e0b3b2d1f27a 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 915c26b23ab6..95634150d21c 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 9ceab426eec9..0ca9d2d5397a 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 665afcb74a56..8478506739fb 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 24a8fc577477..bd81446936fc 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or -- cgit From a3d313ea56fada1c73be022140b8d2b14ff1fc7b Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Fri, 8 Aug 2014 01:00:00 -0400 Subject: be2iscsi: Fix updating the boot enteries in sysfs During port async event driver should check if there is any boot target configured on the adapter. Update sysfs enteries with the boot target parameters. Signed-off-by: Minh Tran Signed-off-by: John Soni Jose Signed-off-by: Jayamohan Kallickal Reviewed-by: Mike Christie Signed-off-by: Christoph Hellwig --- drivers/scsi/be2iscsi/be_cmds.c | 38 ++++++++++++++++++++++++++++++++++++-- drivers/scsi/be2iscsi/be_cmds.h | 8 ++++++++ drivers/scsi/be2iscsi/be_main.c | 17 +++++++++++++++++ drivers/scsi/be2iscsi/be_main.h | 1 + 4 files changed, 62 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index ea4477fe27ab..80d97f3d2ed9 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -275,6 +275,19 @@ bool is_link_state_evt(u32 trailer) ASYNC_EVENT_CODE_LINK_STATE); } +static bool is_iscsi_evt(u32 trailer) +{ + return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & + ASYNC_TRAILER_EVENT_CODE_MASK) == + ASYNC_EVENT_CODE_ISCSI; +} + +static int iscsi_evt_type(u32 trailer) +{ + return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & + ASYNC_TRAILER_EVENT_TYPE_MASK; +} + static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) { if (compl->flags != 0) { @@ -438,7 +451,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba, } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { - phba->state = BE_ADAPTER_LINK_UP; + phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT; beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, @@ -461,7 +474,28 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba) /* Interpret compl as a async link evt */ beiscsi_async_link_state_process(phba, (struct be_async_event_link_state *) compl); - else + else if (is_iscsi_evt(compl->flags)) { + switch (iscsi_evt_type(compl->flags)) { + case ASYNC_EVENT_NEW_ISCSI_TGT_DISC: + case ASYNC_EVENT_NEW_ISCSI_CONN: + case ASYNC_EVENT_NEW_TCP_CONN: + phba->state |= BE_ADAPTER_CHECK_BOOT; + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | + BEISCSI_LOG_MBOX, + "BC_%d : Async iscsi Event," + " flags handled = 0x%08x\n", + compl->flags); + break; + default: + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | + BEISCSI_LOG_MBOX, + "BC_%d : Unsupported Async" + " Event, flags = 0x%08x\n", + compl->flags); + } + } else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index ccda0b6a7324..98897434bcb4 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -118,6 +118,14 @@ struct be_mcc_compl { #define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ #define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF #define ASYNC_EVENT_CODE_LINK_STATE 0x1 +#define ASYNC_EVENT_CODE_ISCSI 0x4 + +#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */ +#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xF +#define ASYNC_EVENT_NEW_ISCSI_TGT_DISC 0x4 +#define ASYNC_EVENT_NEW_ISCSI_CONN 0x5 +#define ASYNC_EVENT_NEW_TCP_CONN 0x7 + struct be_async_event_trailer { u32 code; }; diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 95634150d21c..0762b8e44f95 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -4377,6 +4377,10 @@ static int beiscsi_setup_boot_info(struct beiscsi_hba *phba) { struct iscsi_boot_kobj *boot_kobj; + /* it has been created previously */ + if (phba->boot_kset) + return 0; + /* get boot info using mgmt cmd */ if (beiscsi_get_boot_info(phba)) /* Try to see if we can carry on without this */ @@ -5335,6 +5339,14 @@ static void be_eqd_update(struct beiscsi_hba *phba) } } +static void be_check_boot_session(struct beiscsi_hba *phba) +{ + if (beiscsi_setup_boot_info(phba)) + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Could not set up " + "iSCSI boot info on async event.\n"); +} + /* * beiscsi_hw_health_check()- Check adapter health * @work: work item to check HW health @@ -5350,6 +5362,11 @@ beiscsi_hw_health_check(struct work_struct *work) be_eqd_update(phba); + if (phba->state & BE_ADAPTER_CHECK_BOOT) { + phba->state &= ~BE_ADAPTER_CHECK_BOOT; + be_check_boot_session(phba); + } + beiscsi_ue_detect(phba); schedule_delayed_work(&phba->beiscsi_hw_check_task, diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 0ca9d2d5397a..1e3428a4dbfb 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -104,6 +104,7 @@ #define BE_ADAPTER_LINK_DOWN 0x002 #define BE_ADAPTER_PCI_ERR 0x004 #define BE_ADAPTER_STATE_SHUTDOWN 0x008 +#define BE_ADAPTER_CHECK_BOOT 0x010 #define BEISCSI_CLEAN_UNLOAD 0x01 -- cgit From b7ab35b13379e709a2a1c3f1b3a59e5db62ce4e3 Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Fri, 8 Aug 2014 01:00:01 -0400 Subject: be2iscsi: Fix processing CQE before connection resources are freed Driver should process the completion queue entries before a connection resources are freed. While running mixed traffic due to latency, driver processes the CQE after the connection resources are freed. This fix processes all the completion queue before the connection resources are freed. Signed-off-by: John Soni Jose Signed-off-by: Jayamohan Kallickal Reviewed-by: Mike Christie Signed-off-by: Christoph Hellwig --- drivers/scsi/be2iscsi/be_iscsi.c | 29 +++++++++++++++++++++++++++++ drivers/scsi/be2iscsi/be_main.c | 15 ++++++++++++++- drivers/scsi/be2iscsi/be_main.h | 3 +++ 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index e25203ef1138..b7391a3f9f0b 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -1273,6 +1273,31 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) return 0; } +/** + * beiscsi_flush_cq()- Flush the CQ created. + * @phba: ptr device priv structure. + * + * Before the connection resource are freed flush + * all the CQ enteries + **/ +static void beiscsi_flush_cq(struct beiscsi_hba *phba) +{ + uint16_t i; + struct be_eq_obj *pbe_eq; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_disable(&pbe_eq->iopoll); + beiscsi_process_cq(pbe_eq); + blk_iopoll_enable(&pbe_eq->iopoll); + } +} + /** * beiscsi_close_conn - Upload the connection * @ep: The iscsi endpoint @@ -1294,6 +1319,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag) } ret = beiscsi_mccq_compl(phba, tag, NULL, NULL); + + /* Flush the CQ entries */ + beiscsi_flush_cq(phba); + return ret; } diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 0762b8e44f95..c44dce2aa1f2 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -2068,7 +2068,7 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) * return * Number of Completion Entries processed. **/ -static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) +unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) { struct be_queue_info *cq; struct sol_cqe *sol; @@ -2110,6 +2110,18 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) cri_index = BE_GET_CRI_FROM_CID(cid); ep = phba->ep_array[cri_index]; + + if (ep == NULL) { + /* connection has already been freed + * just move on to next one + */ + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : proc cqe of disconn ep: cid %d\n", + cid); + goto proc_next_cqe; + } + beiscsi_ep = ep->dd_data; beiscsi_conn = beiscsi_ep->conn; @@ -2219,6 +2231,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) break; } +proc_next_cqe: AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); queue_tail_inc(cq); sol = queue_tail_node(cq); diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 1e3428a4dbfb..5f8b0fcbe22a 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -840,6 +840,9 @@ void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, void hwi_ring_cq_db(struct beiscsi_hba *phba, unsigned int id, unsigned int num_processed, unsigned char rearm, unsigned char event); + +unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq); + static inline bool beiscsi_error(struct beiscsi_hba *phba) { return phba->ue_detected || phba->fw_timeout; -- cgit From a07b67194307cc344953ce23c28d840609acac1f Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Fri, 8 Aug 2014 01:00:02 -0400 Subject: be2iscsi: Bump the driver version Bump the driver version Signed-off-by: John Soni Jose Signed-off-by: Jayamohan Kallickal Reviewed-by: Mike Christie Signed-off-by: Christoph Hellwig --- drivers/scsi/be2iscsi/be_main.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 5f8b0fcbe22a..36056311a6e4 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -36,7 +36,7 @@ #include #define DRV_NAME "be2iscsi" -#define BUILD_STR "10.2.273.0" +#define BUILD_STR "10.4.74.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" -- cgit From 9e0328453ea1a0e800b10daafffbadeac68273fd Mon Sep 17 00:00:00 2001 From: Suresh Thiagarajan Date: Mon, 11 Aug 2014 11:50:35 +0530 Subject: pm8001: Update nvmd response data to request buffer Instead of using the virt_ptr use request buffer for copying back the nvmd response data and use the same in request function also Signed-off-by: Suresh Thiagarajan Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/pm8001/pm8001_ctl.c | 4 +--- drivers/scsi/pm8001/pm8001_hwi.c | 8 ++++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 7abbf284da1a..be8269c8d127 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -385,7 +385,6 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; char *str = buf; - void *virt_addr; int bios_index; DECLARE_COMPLETION_ONSTACK(completion); struct pm8001_ioctl_payload payload; @@ -402,11 +401,10 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, return -ENOMEM; } wait_for_completion(&completion); - virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT; bios_index++) str += sprintf(str, "%c", - *((u8 *)((u8 *)virt_addr+bios_index))); + *(payload.func_specific+bios_index)); kfree(payload.func_specific); return str - buf; } diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index dd12c6fe57a6..933f21471951 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -3132,6 +3132,7 @@ void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { + struct fw_control_ex *fw_control_context; struct get_nvm_data_resp *pPayload = (struct get_nvm_data_resp *)(piomb + 4); u32 tag = le32_to_cpu(pPayload->tag); @@ -3140,6 +3141,7 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 ir_tds_bn_dps_das_nvm = le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm); void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; + fw_control_context = ccb->fw_control_context; PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n")); if ((dlen_status & NVMD_STAT) != 0) { @@ -3180,6 +3182,12 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n", (dlen_status & NVMD_LEN) >> 24)); } + /* Though fw_control_context is freed below, usrAddr still needs + * to be updated as this holds the response to the request function + */ + memcpy(fw_control_context->usrAddr, + pm8001_ha->memoryMap.region[NVMD].virt_ptr, + fw_control_context->len); kfree(ccb->fw_control_context); ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; -- cgit From 132aa220b45d60e9b20def1e9d8be9422eed9616 Mon Sep 17 00:00:00 2001 From: Tomas Henzl Date: Thu, 14 Aug 2014 16:12:39 +0200 Subject: hpsa: refine the pci enable/disable handling When a second(kdump) kernel starts and the hard reset method is used the driver calls pci_disable_device without previously enabling it, so the kernel shows a warning - [ 16.876248] WARNING: at drivers/pci/pci.c:1431 pci_disable_device+0x84/0x90() [ 16.882686] Device hpsa disabling already-disabled device ... This patch fixes it, in addition to this I tried to balance also some other pairs of enable/disable device in the driver. Unfortunately I wasn't able to verify the functionality for the case of a sw reset, because of a lack of proper hw. Signed-off-by: Tomas Henzl Reviewed-by: Stephen M. Cameron Signed-off-by: Christoph Hellwig --- drivers/scsi/hpsa.c | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 8c17c432dcd5..7828834e212b 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5971,10 +5971,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) /* Save the PCI command register */ pci_read_config_word(pdev, 4, &command_register); - /* Turn the board off. This is so that later pci_restore_state() - * won't turn the board on before the rest of config space is ready. - */ - pci_disable_device(pdev); pci_save_state(pdev); /* find the first memory BAR, so we can find the cfg table */ @@ -6022,11 +6018,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) goto unmap_cfgtable; pci_restore_state(pdev); - rc = pci_enable_device(pdev); - if (rc) { - dev_warn(&pdev->dev, "failed to enable device.\n"); - goto unmap_cfgtable; - } pci_write_config_word(pdev, 4, command_register); /* Some devices (notably the HP Smart Array 5i Controller) @@ -6537,6 +6528,23 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev) if (!reset_devices) return 0; + /* kdump kernel is loading, we don't know in which state is + * the pci interface. The dev->enable_cnt is equal zero + * so we call enable+disable, wait a while and switch it on. + */ + rc = pci_enable_device(pdev); + if (rc) { + dev_warn(&pdev->dev, "Failed to enable PCI device\n"); + return -ENODEV; + } + pci_disable_device(pdev); + msleep(260); /* a randomly chosen number */ + rc = pci_enable_device(pdev); + if (rc) { + dev_warn(&pdev->dev, "failed to enable device.\n"); + return -ENODEV; + } + /* Reset the controller with a PCI power-cycle or via doorbell */ rc = hpsa_kdump_hard_reset_controller(pdev); @@ -6545,10 +6553,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev) * "performant mode". Or, it might be 640x, which can't reset * due to concerns about shared bbwc between 6402/6404 pair. */ - if (rc == -ENOTSUPP) - return rc; /* just try to do the kdump anyhow. */ - if (rc) - return -ENODEV; + if (rc) { + if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */ + rc = -ENODEV; + goto out_disable; + } /* Now try to get the controller to respond to a no-op */ dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); @@ -6559,7 +6568,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev) dev_warn(&pdev->dev, "no-op failed%s\n", (i < 11 ? "; re-trying" : "")); } - return 0; + +out_disable: + + pci_disable_device(pdev); + return rc; } static int hpsa_allocate_cmd_pool(struct ctlr_info *h) @@ -6739,6 +6752,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) iounmap(h->transtable); if (h->cfgtable) iounmap(h->cfgtable); + pci_disable_device(h->pdev); pci_release_regions(h->pdev); kfree(h); } -- cgit From 5d46ad7d88a78cca36ff6472a5bdc7d1c9c22654 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 16 Aug 2014 14:15:15 -0700 Subject: fusion: fix excess parameter kernel-doc warning Fix kernel-doc excess parameter warning: Warning(..//drivers/message/fusion/mptbase.c:1411): Excess function parameter 'prod_name' description in 'mpt_get_product_name' Signed-off-by: Randy Dunlap Reviewed-by: Ewan D. Milne Signed-off-by: Christoph Hellwig --- drivers/message/fusion/mptbase.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 68f57d3bcca1..187f83629f7e 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -1400,7 +1400,6 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp) * @vendor: pci vendor id * @device: pci device id * @revision: pci revision id - * @prod_name: string returned * * Returns product string displayed when driver loads, * in /proc/mpt/summary and /sysfs/class/scsi_host/host/version_product -- cgit From 01123ef4c3fc9b9ff3062df2e10dee9b139b46b4 Mon Sep 17 00:00:00 2001 From: Douglas Gilbert Date: Tue, 5 Aug 2014 12:20:02 +0200 Subject: scsi_debug: scsi_cmnd->cmnd check and casts unnecessary This patch removes a NULL check for the scsi_cmnd::cmnd pointer since many other instances in this driver and elsewhere assume it is valid. Also redundant casts to 'unsigned char *' are removed as the pointer has that type. Signed-off-by: Douglas Gilbert Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/scsi_debug.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index d19c0e3c7f48..07d224aa9b47 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -929,7 +929,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target, { unsigned char pq_pdt; unsigned char * arr; - unsigned char *cmd = (unsigned char *)scp->cmnd; + unsigned char *cmd = scp->cmnd; int alloc_len, n, ret; alloc_len = (cmd[3] << 8) + cmd[4]; @@ -1075,7 +1075,7 @@ static int resp_requests(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { unsigned char * sbuff; - unsigned char *cmd = (unsigned char *)scp->cmnd; + unsigned char *cmd = scp->cmnd; unsigned char arr[SCSI_SENSE_BUFFERSIZE]; int want_dsense; int len = 18; @@ -1115,7 +1115,7 @@ static int resp_requests(struct scsi_cmnd * scp, static int resp_start_stop(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { - unsigned char *cmd = (unsigned char *)scp->cmnd; + unsigned char *cmd = scp->cmnd; int power_cond, errsts, start; errsts = check_readiness(scp, UAS_ONLY, devip); @@ -1177,7 +1177,7 @@ static int resp_readcap(struct scsi_cmnd * scp, static int resp_readcap16(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { - unsigned char *cmd = (unsigned char *)scp->cmnd; + unsigned char *cmd = scp->cmnd; unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; unsigned long long capac; int errsts, k, alloc_len; @@ -1222,7 +1222,7 @@ static int resp_readcap16(struct scsi_cmnd * scp, static int resp_report_tgtpgs(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { - unsigned char *cmd = (unsigned char *)scp->cmnd; + unsigned char *cmd = scp->cmnd; unsigned char * arr; int host_no = devip->sdbg_host->shost->host_no; int n, ret, alen, rlen; @@ -1468,7 +1468,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, int k, alloc_len, msense_6, offset, len, errsts, target_dev_id; unsigned char * ap; unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; - unsigned char *cmd = (unsigned char *)scp->cmnd; + unsigned char *cmd = scp->cmnd; errsts = check_readiness(scp, UAS_ONLY, devip); if (errsts) @@ -1630,7 +1630,7 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, int pf, sp, ps, md_len, bd_len, off, spf, pg_len; int param_len, res, errsts, mpage; unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; - unsigned char *cmd = (unsigned char *)scp->cmnd; + unsigned char *cmd = scp->cmnd; errsts = check_readiness(scp, UAS_ONLY, devip); if (errsts) @@ -1739,7 +1739,7 @@ static int resp_log_sense(struct scsi_cmnd * scp, { int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n; unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; - unsigned char *cmd = (unsigned char *)scp->cmnd; + unsigned char *cmd = scp->cmnd; errsts = check_readiness(scp, UAS_ONLY, devip); if (errsts) @@ -2414,7 +2414,7 @@ static int resp_report_luns(struct scsi_cmnd * scp, unsigned int alloc_len; int lun_cnt, i, upper, num, n; u64 wlun, lun; - unsigned char *cmd = (unsigned char *)scp->cmnd; + unsigned char *cmd = scp->cmnd; int select_report = (int)cmd[2]; struct scsi_lun *one_lun; unsigned char arr[SDEBUG_RLUN_ARR_SZ]; @@ -4085,7 +4085,7 @@ static void sdebug_remove_adapter(void) static int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt) { - unsigned char *cmd = (unsigned char *) SCpnt->cmnd; + unsigned char *cmd = SCpnt->cmnd; int len, k; unsigned int num; unsigned long long lba; @@ -4103,7 +4103,7 @@ scsi_debug_queuecommand(struct scsi_cmnd *SCpnt) scsi_set_resid(SCpnt, 0); if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && - !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts) && cmd) { + !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) { char b[120]; int n; -- cgit From cd62b7dae245dd3bb3a21eaadcf01d93ec4fcc7c Mon Sep 17 00:00:00 2001 From: Douglas Gilbert Date: Tue, 5 Aug 2014 12:20:46 +0200 Subject: scsi_debug: give unit attention and other errors precedence over TSF Give existing errors priority over the generation of Task Set Full (TSF) errors. So that max_queue is not exceeded, existing errors may be sent back in the invocation thread. This is done so errors like Unit Attentions are not hidden and lost by either max_queue exceeded or real/injected TSFs. Signed-off-by: Douglas Gilbert Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/scsi_debug.c | 67 +++++++++++++++++++++++------------------------ 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 07d224aa9b47..693f2fa327ee 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3006,7 +3006,7 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, int scsi_result, int delta_jiff) { unsigned long iflags; - int k, num_in_q, tsf, qdepth, inject; + int k, num_in_q, qdepth, inject; struct sdebug_queued_cmd *sqcp = NULL; struct scsi_device *sdp = cmnd->device; @@ -3019,55 +3019,48 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", __func__, scsi_result); - if (delta_jiff == 0) { - /* using same thread to call back mid-layer */ - cmnd->result = scsi_result; - cmnd->scsi_done(cmnd); - return 0; - } + if (delta_jiff == 0) + goto respond_in_thread; - /* deferred response cases */ + /* schedule the response at a later time if resources permit */ spin_lock_irqsave(&queued_arr_lock, iflags); num_in_q = atomic_read(&devip->num_in_q); qdepth = cmnd->device->queue_depth; - k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue); - tsf = 0; inject = 0; - if ((qdepth > 0) && (num_in_q >= qdepth)) - tsf = 1; - else if ((scsi_debug_every_nth != 0) && - (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts)) { + if ((qdepth > 0) && (num_in_q >= qdepth)) { + if (scsi_result) { + spin_unlock_irqrestore(&queued_arr_lock, iflags); + goto respond_in_thread; + } else + scsi_result = device_qfull_result; + } else if ((scsi_debug_every_nth != 0) && + (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) && + (scsi_result == 0)) { if ((num_in_q == (qdepth - 1)) && (atomic_inc_return(&sdebug_a_tsf) >= abs(scsi_debug_every_nth))) { atomic_set(&sdebug_a_tsf, 0); inject = 1; - tsf = 1; + scsi_result = device_qfull_result; } } - /* if (tsf) simulate device reporting SCSI status of TASK SET FULL. - * Might override existing CHECK CONDITION. */ - if (tsf) - scsi_result = device_qfull_result; + k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue); if (k >= scsi_debug_max_queue) { - if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts) - tsf = 1; spin_unlock_irqrestore(&queued_arr_lock, iflags); + if (scsi_result) + goto respond_in_thread; + else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts) + scsi_result = device_qfull_result; if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) sdev_printk(KERN_INFO, sdp, - "%s: num_in_q=%d, bypass q, %s%s\n", - __func__, num_in_q, - (inject ? " " : ""), - (tsf ? "status: TASK SET FULL" : - "report: host busy")); - if (tsf) { - /* queued_arr full so respond in same thread */ - cmnd->result = scsi_result; - cmnd->scsi_done(cmnd); - /* As scsi_done() is called "inline" must return 0 */ - return 0; - } else + "%s: max_queue=%d exceeded, %s\n", + __func__, scsi_debug_max_queue, + (scsi_result ? "status: TASK SET FULL" : + "report: host busy")); + if (scsi_result) + goto respond_in_thread; + else return SCSI_MLQUEUE_HOST_BUSY; } __set_bit(k, queued_in_use_bm); @@ -3117,12 +3110,18 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, else tasklet_schedule(sqcp->tletp); } - if (tsf && (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)) + if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) && + (scsi_result == device_qfull_result)) sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__, num_in_q, (inject ? " " : ""), "status: TASK SET FULL"); return 0; + +respond_in_thread: /* call back to mid-layer using invocation thread */ + cmnd->result = scsi_result; + cmnd->scsi_done(cmnd); + return 0; } /* Note: The following macros create attribute files in the -- cgit From e46b0344be9b50e8254ddd74e3c5b439d5fca3ce Mon Sep 17 00:00:00 2001 From: Douglas Gilbert Date: Tue, 5 Aug 2014 12:21:53 +0200 Subject: scsi_debug: bump inquiry version to SPC-4, update version descriptors Since a lot of functionality from SPC-4 is supported by this driver (e.g. LBP and PI) then bump the default INQUIRY version from SPC-3 to SPC-4. Also update the INQUIRY version descriptors. Signed-off-by: Douglas Gilbert Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/scsi_debug.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 693f2fa327ee..1c475e9a46ab 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -123,7 +123,7 @@ static const char *scsi_debug_version_date = "20140706"; #define DEF_PHYSBLK_EXP 0 #define DEF_PTYPE 0 #define DEF_REMOVABLE false -#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ +#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */ #define DEF_SECTOR_SIZE 512 #define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */ #define DEF_UNMAP_ALIGNMENT 0 @@ -1056,15 +1056,15 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target, memcpy(&arr[16], inq_product_id, 16); memcpy(&arr[32], inq_product_rev, 4); /* version descriptors (2 bytes each) follow */ - arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */ - arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */ + arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */ + arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */ n = 62; if (scsi_debug_ptype == 0) { - arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */ + arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */ } else if (scsi_debug_ptype == 1) { - arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */ + arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */ } - arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */ + arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */ ret = fill_from_dev_buffer(scp, arr, min(alloc_len, SDEBUG_LONG_INQ_SZ)); kfree(arr); @@ -3205,7 +3205,7 @@ MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); -MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); +MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])"); MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); -- cgit From 4cbfea8865825ad9f38beb627b50b308eb2d92be Mon Sep 17 00:00:00 2001 From: Adam Radford Date: Wed, 9 Jul 2014 15:17:56 -0700 Subject: megaraid_sas: Fix LD/VF affiliation parsing The following patch for megaraid_sas fixes the LD/VF affiliation policy parsing code to account for LD targetId's and Hidden LD's (not yet affiliated with any Virtual Functions). This also breaks megasas_get_ld_vf_affiliation() into 2 separate functions: megasas_get_ld_vf_affiliation_111() and megasas_get_ld_Vf_affiliation_12() to reduce indentation levels. Signed-off-by: Adam Radford Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas.h | 1 + drivers/scsi/megaraid/megaraid_sas_base.c | 318 +++++++++++++++++++----------- 2 files changed, 208 insertions(+), 111 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 32166c2c7854..2e4b808b5686 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1661,6 +1661,7 @@ struct MR_LD_VF_AFFILIATION { /* Plasma 1.11 FW backward compatibility structures */ #define IOV_111_OFFSET 0x7CE #define MAX_VIRTUAL_FUNCTIONS 8 +#define MR_LD_ACCESS_HIDDEN 15 struct IOV_111 { u8 maxVFsSupported; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index a2f4d4f6515c..24e4116d2f4b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1825,16 +1825,12 @@ void megasas_do_ocr(struct megasas_instance *instance) process_fw_state_change_wq(&instance->work_init); } -/* This function will get the current SR-IOV LD/VF affiliation */ -static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, - int initial) +static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, + int initial) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; - struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; - struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; - dma_addr_t new_affiliation_h; dma_addr_t new_affiliation_111_h; int ld, retval = 0; u8 thisVf; @@ -1842,15 +1838,15 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, cmd = megasas_get_cmd(instance); if (!cmd) { - printk(KERN_DEBUG "megasas: megasas_get_ld_vf_" - "affiliation: Failed to get cmd for scsi%d.\n", + printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation_111:" + "Failed to get cmd for scsi%d.\n", instance->host->host_no); return -ENOMEM; } dcmd = &cmd->frame->dcmd; - if (!instance->vf_affiliation && !instance->vf_affiliation_111) { + if (!instance->vf_affiliation_111) { printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF " "affiliation for scsi%d.\n", instance->host->host_no); megasas_return_cmd(instance, cmd); @@ -1858,38 +1854,22 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, } if (initial) - if (instance->PlasmaFW111) memset(instance->vf_affiliation_111, 0, sizeof(struct MR_LD_VF_AFFILIATION_111)); - else - memset(instance->vf_affiliation, 0, - (MAX_LOGICAL_DRIVES + 1) * - sizeof(struct MR_LD_VF_AFFILIATION)); else { - if (instance->PlasmaFW111) - new_affiliation_111 = - pci_alloc_consistent(instance->pdev, - sizeof(struct MR_LD_VF_AFFILIATION_111), - &new_affiliation_111_h); - else - new_affiliation = - pci_alloc_consistent(instance->pdev, - (MAX_LOGICAL_DRIVES + 1) * - sizeof(struct MR_LD_VF_AFFILIATION), - &new_affiliation_h); - if (!new_affiliation && !new_affiliation_111) { + new_affiliation_111 = + pci_alloc_consistent(instance->pdev, + sizeof(struct MR_LD_VF_AFFILIATION_111), + &new_affiliation_111_h); + if (!new_affiliation_111) { printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate " "memory for new affiliation for scsi%d.\n", - instance->host->host_no); + instance->host->host_no); megasas_return_cmd(instance, cmd); return -ENOMEM; } - if (instance->PlasmaFW111) - memset(new_affiliation_111, 0, - sizeof(struct MR_LD_VF_AFFILIATION_111)); - else - memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * - sizeof(struct MR_LD_VF_AFFILIATION)); + memset(new_affiliation_111, 0, + sizeof(struct MR_LD_VF_AFFILIATION_111)); } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); @@ -1900,34 +1880,17 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, dcmd->flags = MFI_FRAME_DIR_BOTH; dcmd->timeout = 0; dcmd->pad_0 = 0; - if (instance->PlasmaFW111) { - dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111); - dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111; - } else { - dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) * - sizeof(struct MR_LD_VF_AFFILIATION); - dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS; - } + dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111); + dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111; - if (initial) { - if (instance->PlasmaFW111) - dcmd->sgl.sge32[0].phys_addr = - instance->vf_affiliation_111_h; - else - dcmd->sgl.sge32[0].phys_addr = - instance->vf_affiliation_h; - } else { - if (instance->PlasmaFW111) - dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h; - else - dcmd->sgl.sge32[0].phys_addr = new_affiliation_h; - } - if (instance->PlasmaFW111) - dcmd->sgl.sge32[0].length = - sizeof(struct MR_LD_VF_AFFILIATION_111); + if (initial) + dcmd->sgl.sge32[0].phys_addr = + instance->vf_affiliation_111_h; else - dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) * - sizeof(struct MR_LD_VF_AFFILIATION); + dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h; + + dcmd->sgl.sge32[0].length = + sizeof(struct MR_LD_VF_AFFILIATION_111); printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for " "scsi%d\n", instance->host->host_no); @@ -1943,80 +1906,213 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, } if (!initial) { - if (instance->PlasmaFW111) { - if (!new_affiliation_111->vdCount) { - printk(KERN_WARNING "megasas: SR-IOV: Got new " - "LD/VF affiliation for passive path " + thisVf = new_affiliation_111->thisVf; + for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) + if (instance->vf_affiliation_111->map[ld].policy[thisVf] != + new_affiliation_111->map[ld].policy[thisVf]) { + printk(KERN_WARNING "megasas: SR-IOV: " + "Got new LD/VF affiliation " "for scsi%d.\n", - instance->host->host_no); - retval = 1; - goto out; - } - thisVf = new_affiliation_111->thisVf; - for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) - if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) { - printk(KERN_WARNING "megasas: SR-IOV: " - "Got new LD/VF affiliation " - "for scsi%d.\n", - instance->host->host_no); - memcpy(instance->vf_affiliation_111, - new_affiliation_111, - sizeof(struct MR_LD_VF_AFFILIATION_111)); - retval = 1; - goto out; - } - } else { - if (!new_affiliation->ldCount) { - printk(KERN_WARNING "megasas: SR-IOV: Got new " - "LD/VF affiliation for passive " - "path for scsi%d.\n", instance->host->host_no); + memcpy(instance->vf_affiliation_111, + new_affiliation_111, + sizeof(struct MR_LD_VF_AFFILIATION_111)); retval = 1; goto out; } - newmap = new_affiliation->map; - savedmap = instance->vf_affiliation->map; - thisVf = new_affiliation->thisVf; - for (ld = 0 ; ld < new_affiliation->ldCount; ld++) { - if (savedmap->policy[thisVf] != - newmap->policy[thisVf]) { - printk(KERN_WARNING "megasas: SR-IOV: " - "Got new LD/VF affiliation " - "for scsi%d.\n", - instance->host->host_no); - memcpy(instance->vf_affiliation, - new_affiliation, - new_affiliation->size); - retval = 1; - goto out; + } +out: + if (new_affiliation_111) { + pci_free_consistent(instance->pdev, + sizeof(struct MR_LD_VF_AFFILIATION_111), + new_affiliation_111, + new_affiliation_111_h); + } + megasas_return_cmd(instance, cmd); + + return retval; +} + +static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, + int initial) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; + struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; + dma_addr_t new_affiliation_h; + int i, j, retval = 0, found = 0, doscan = 0; + u8 thisVf; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation12: " + "Failed to get cmd for scsi%d.\n", + instance->host->host_no); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + if (!instance->vf_affiliation) { + printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF " + "affiliation for scsi%d.\n", instance->host->host_no); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + if (initial) + memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION)); + else { + new_affiliation = + pci_alloc_consistent(instance->pdev, + (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION), + &new_affiliation_h); + if (!new_affiliation) { + printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate " + "memory for new affiliation for scsi%d.\n", + instance->host->host_no); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION)); + } + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_BOTH; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION); + dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS; + + if (initial) + dcmd->sgl.sge32[0].phys_addr = instance->vf_affiliation_h; + else + dcmd->sgl.sge32[0].phys_addr = new_affiliation_h; + + dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION); + + printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for " + "scsi%d\n", instance->host->host_no); + + megasas_issue_blocked_cmd(instance, cmd, 0); + + if (dcmd->cmd_status) { + printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD" + " failed with status 0x%x for scsi%d.\n", + dcmd->cmd_status, instance->host->host_no); + retval = 1; /* Do a scan if we couldn't get affiliation */ + goto out; + } + + if (!initial) { + if (!new_affiliation->ldCount) { + printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF " + "affiliation for passive path for scsi%d.\n", + instance->host->host_no); + retval = 1; + goto out; + } + newmap = new_affiliation->map; + savedmap = instance->vf_affiliation->map; + thisVf = new_affiliation->thisVf; + for (i = 0 ; i < new_affiliation->ldCount; i++) { + found = 0; + for (j = 0; j < instance->vf_affiliation->ldCount; + j++) { + if (newmap->ref.targetId == + savedmap->ref.targetId) { + found = 1; + if (newmap->policy[thisVf] != + savedmap->policy[thisVf]) { + doscan = 1; + goto out; + } } savedmap = (struct MR_LD_VF_MAP *) ((unsigned char *)savedmap + savedmap->size); + } + if (!found && newmap->policy[thisVf] != + MR_LD_ACCESS_HIDDEN) { + doscan = 1; + goto out; + } + newmap = (struct MR_LD_VF_MAP *) + ((unsigned char *)newmap + newmap->size); + } + + newmap = new_affiliation->map; + savedmap = instance->vf_affiliation->map; + + for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { + found = 0; + for (j = 0 ; j < new_affiliation->ldCount; j++) { + if (savedmap->ref.targetId == + newmap->ref.targetId) { + found = 1; + if (savedmap->policy[thisVf] != + newmap->policy[thisVf]) { + doscan = 1; + goto out; + } + } newmap = (struct MR_LD_VF_MAP *) ((unsigned char *)newmap + newmap->size); } + if (!found && savedmap->policy[thisVf] != + MR_LD_ACCESS_HIDDEN) { + doscan = 1; + goto out; + } + savedmap = (struct MR_LD_VF_MAP *) + ((unsigned char *)savedmap + + savedmap->size); } } out: - if (new_affiliation) { - if (instance->PlasmaFW111) - pci_free_consistent(instance->pdev, - sizeof(struct MR_LD_VF_AFFILIATION_111), - new_affiliation_111, - new_affiliation_111_h); - else - pci_free_consistent(instance->pdev, - (MAX_LOGICAL_DRIVES + 1) * - sizeof(struct MR_LD_VF_AFFILIATION), - new_affiliation, new_affiliation_h); + if (doscan) { + printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF " + "affiliation for scsi%d.\n", instance->host->host_no); + memcpy(instance->vf_affiliation, new_affiliation, + new_affiliation->size); + retval = 1; } + + if (new_affiliation) + pci_free_consistent(instance->pdev, + (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION), + new_affiliation, new_affiliation_h); megasas_return_cmd(instance, cmd); return retval; } +/* This function will get the current SR-IOV LD/VF affiliation */ +static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, + int initial) +{ + int retval; + + if (instance->PlasmaFW111) + retval = megasas_get_ld_vf_affiliation_111(instance, initial); + else + retval = megasas_get_ld_vf_affiliation_12(instance, initial); + return retval; +} + /* This function will tell FW to start the SR-IOV heartbeat */ int megasas_sriov_start_heartbeat(struct megasas_instance *instance, int initial) -- cgit From 9ea81f8169bef693a0136f3ab1110b00dc7c6f84 Mon Sep 17 00:00:00 2001 From: Adam Radford Date: Wed, 9 Jul 2014 15:17:57 -0700 Subject: megaraid_sas: Add missing initial call to megasas_get_ld_vf_affiliation(). The following patch for megaraid_sas adds a missing initial call to megasas_get_ld_vf_affiliation() at the end of megasas_probe_one(). Signed-off-by: Adam Radford Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas_base.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 24e4116d2f4b..3970002ba4e5 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4984,6 +4984,10 @@ retry_irq_register: goto fail_start_aen; } + /* Get current SR-IOV LD/VF affiliation */ + if (instance->requestorId) + megasas_get_ld_vf_affiliation(instance, 1); + return 0; fail_start_aen: -- cgit From c21bb25e8a8653c6d5ba1bec5c6ef4a20ee35347 Mon Sep 17 00:00:00 2001 From: Adam Radford Date: Wed, 9 Jul 2014 15:17:55 -0700 Subject: megaraid_sas: Remove unused variables in megasas_instance The following patch for megaraid_sas removes some unused variables from the megasas_instance structure. Signed-off-by: Adam Radford Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 2e4b808b5686..2e2fcb2c620c 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1633,8 +1633,6 @@ struct megasas_instance { struct timer_list sriov_heartbeat_timer; char skip_heartbeat_timer_del; u8 requestorId; - u64 initiator_sas_address; - u64 ld_sas_address[64]; char PlasmaFW111; char mpio; int throttlequeuedepth; -- cgit From a2fbcbc3f0aa3bea3bf5c86e41f9c543c8de9e75 Mon Sep 17 00:00:00 2001 From: Adam Radford Date: Wed, 9 Jul 2014 15:17:54 -0700 Subject: megaraid_sas: Fix reset_mutex leak The following patch for megaraid_sas fixes a reset_mutex leak in megasas_reset_fusion(). Signed-off-by: Adam Radford Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 3ed03dfab76c..57b47fe69072 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2355,6 +2355,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) printk(KERN_WARNING "megaraid_sas: Hardware critical error, " "returning FAILED for scsi%d.\n", instance->host->host_no); + mutex_unlock(&instance->reset_mutex); return FAILED; } -- cgit From 5e8d90070b87df4237d3dc88ffa652f30badce85 Mon Sep 17 00:00:00 2001 From: Adam Radford Date: Wed, 9 Jul 2014 15:17:58 -0700 Subject: megaraid_sas: Version and Changelog update The following patch for megaraid_sas updates the driver version and Documentation/scsi/ChangeLog.megaraid_sas. Signed-off-by: Adam Radford Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- Documentation/scsi/ChangeLog.megaraid_sas | 14 ++++++++++++++ drivers/scsi/megaraid/megaraid_sas.h | 6 +++--- drivers/scsi/megaraid/megaraid_sas_base.c | 2 +- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas index 91ba58ef02d7..18b570990040 100644 --- a/Documentation/scsi/ChangeLog.megaraid_sas +++ b/Documentation/scsi/ChangeLog.megaraid_sas @@ -1,3 +1,17 @@ +Release Date : Thu. Jun 19, 2014 17:00:00 PST 2014 - + (emaild-id:megaraidlinux@lsi.com) + Adam Radford + Kashyap Desai + Sumit Saxena + Uday Lingala +Current Version : 06.803.02.00-rc1 +Old Version : 06.803.01.00-rc1 + 1. Fix reset_mutex leak in megasas_reset_fusion(). + 2. Remove unused variables in megasas_instance. + 3. Fix LD/VF affiliation parsing. + 4. Add missing initial call to megasas_get_ld_vf_affiliation(). + 5. Version and Changelog update. +------------------------------------------------------------------------------- Release Date : Mon. Mar 10, 2014 17:00:00 PST 2014 - (emaild-id:megaraidlinux@lsi.com) Adam Radford diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 2e2fcb2c620c..bc7adcf537f2 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -33,9 +33,9 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "06.803.01.00-rc1" -#define MEGASAS_RELDATE "Mar. 10, 2014" -#define MEGASAS_EXT_VERSION "Mon. Mar. 10 17:00:00 PDT 2014" +#define MEGASAS_VERSION "06.803.02.00-rc1" +#define MEGASAS_RELDATE "Jun. 19, 2014" +#define MEGASAS_EXT_VERSION "Thu. Jun. 19 17:00:00 PDT 2014" /* * Device IDs diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 3970002ba4e5..ff44f2ce4153 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -18,7 +18,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_base.c - * Version : 06.803.01.00-rc1 + * Version : 06.803.02.00-rc1 * * Authors: LSI Corporation * Sreenivas Bagalkote -- cgit From e149fc13dbc1c113e667e7f5bd45288853bdf6bb Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:48 +0200 Subject: be2iscsi: Use pci_enable_msix_range() As result of deprecation of MSI-X/MSI enablement functions pci_enable_msix() and pci_enable_msi_block() all drivers using these two interfaces need to be updated to use the new pci_enable_msi_range() and pci_enable_msix_range() interfaces. Signed-off-by: Alexander Gordeev Reviewed-by: Tomas Henzl Acked-by: Jayamohan Kallickal Signed-off-by: Christoph Hellwig --- drivers/scsi/be2iscsi/be_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index c44dce2aa1f2..db2bd4d5311a 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5293,9 +5293,9 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba) for (i = 0; i <= phba->num_cpus; i++) phba->msix_entries[i].entry = i; - status = pci_enable_msix(phba->pcidev, phba->msix_entries, - (phba->num_cpus + 1)); - if (!status) + status = pci_enable_msix_range(phba->pcidev, phba->msix_entries, + phba->num_cpus + 1, phba->num_cpus + 1); + if (status > 0) phba->msix_enabled = true; return; -- cgit From c6b9bad280674a51f42929fc1fe6964ac3528dc8 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:49 +0200 Subject: csiostor: Remove superfluous call to pci_disable_msix() There is no need to call pci_disable_msix() in case the previous call to pci_enable_msix() failed Signed-off-by: Alexander Gordeev Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/csiostor/csio_isr.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c index 7ee9777ae2c5..91ba91dac675 100644 --- a/drivers/scsi/csiostor/csio_isr.c +++ b/drivers/scsi/csiostor/csio_isr.c @@ -529,10 +529,8 @@ csio_enable_msix(struct csio_hw *hw) csio_reduce_sqsets(hw, cnt - extra); } } else { - if (rv > 0) { - pci_disable_msix(hw->pdev); + if (rv > 0) csio_info(hw, "Not using MSI-X, remainder:%d\n", rv); - } kfree(entries); return -ENOMEM; -- cgit From 6b73352134d9cd6ff1af1962799b738b4de604be Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:50 +0200 Subject: csiostor: Use pci_enable_msix_range() instead of pci_enable_msix() As result of deprecation of MSI-X/MSI enablement functions pci_enable_msix() and pci_enable_msi_block() all drivers using these two interfaces need to be updated to use the new pci_enable_msi_range() or pci_enable_msi_exact() and pci_enable_msix_range() or pci_enable_msix_exact() interfaces. Signed-off-by: Alexander Gordeev Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/csiostor/csio_hw.h | 2 +- drivers/scsi/csiostor/csio_isr.c | 22 +++++++++------------- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h index 49b1daa4476e..5db2d85195b1 100644 --- a/drivers/scsi/csiostor/csio_hw.h +++ b/drivers/scsi/csiostor/csio_hw.h @@ -94,7 +94,7 @@ enum { }; struct csio_msix_entries { - unsigned short vector; /* Vector assigned by pci_enable_msix */ + unsigned short vector; /* Assigned MSI-X vector */ void *dev_id; /* Priv object associated w/ this msix*/ char desc[24]; /* Description of this vector */ }; diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c index 91ba91dac675..a8c748a35f9c 100644 --- a/drivers/scsi/csiostor/csio_isr.c +++ b/drivers/scsi/csiostor/csio_isr.c @@ -499,7 +499,7 @@ csio_reduce_sqsets(struct csio_hw *hw, int cnt) static int csio_enable_msix(struct csio_hw *hw) { - int rv, i, j, k, n, min, cnt; + int i, j, k, n, min, cnt; struct csio_msix_entries *entryp; struct msix_entry *entries; int extra = CSIO_EXTRA_VECS; @@ -521,19 +521,15 @@ csio_enable_msix(struct csio_hw *hw) csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt); - while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min) - cnt = rv; - if (!rv) { - if (cnt < (hw->num_sqsets + extra)) { - csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra); - csio_reduce_sqsets(hw, cnt - extra); - } - } else { - if (rv > 0) - csio_info(hw, "Not using MSI-X, remainder:%d\n", rv); - + cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt); + if (cnt < 0) { kfree(entries); - return -ENOMEM; + return cnt; + } + + if (cnt < (hw->num_sqsets + extra)) { + csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra); + csio_reduce_sqsets(hw, cnt - extra); } /* Save off vectors */ -- cgit From 182801ff266e61d2c4b18aab20f0ffed0f36b873 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:52 +0200 Subject: pmcraid: Get rid of a redundant assignment Signed-off-by: Alexander Gordeev Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/pmcraid.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 6f3275d020a0..6e575edf29f4 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -4746,7 +4746,6 @@ pmcraid_isr_legacy: pinstance->hrrq_vector[0].drv_inst = pinstance; pinstance->hrrq_vector[0].vector = pdev->irq; pinstance->num_hrrq = 1; - rc = 0; rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED, PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]); -- cgit From c01a8bc084b9bec58a9ee3c7a95fb4266328c4b8 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2014 08:01:53 +0200 Subject: pmcraid: Use pci_enable_msix_range() instead of pci_enable_msix() As result of deprecation of MSI-X/MSI enablement functions pci_enable_msix() and pci_enable_msi_block() all drivers using these two interfaces need to be updated to use the new pci_enable_msi_range() or pci_enable_msi_exact() and pci_enable_msix_range() or pci_enable_msix_exact() interfaces. Signed-off-by: Alexander Gordeev Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/pmcraid.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 6e575edf29f4..bcb64eb1387f 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -4698,19 +4698,10 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++) entries[i].entry = i; - rc = pci_enable_msix(pdev, entries, num_hrrq); - if (rc < 0) + num_hrrq = pci_enable_msix_range(pdev, entries, 1, num_hrrq); + if (num_hrrq < 0) goto pmcraid_isr_legacy; - /* Check how many MSIX vectors are allocated and register - * msi-x handlers for each of them giving appropriate buffer - */ - if (rc > 0) { - num_hrrq = rc; - if (pci_enable_msix(pdev, entries, num_hrrq)) - goto pmcraid_isr_legacy; - } - for (i = 0; i < num_hrrq; i++) { pinstance->hrrq_vector[i].hrrq_id = i; pinstance->hrrq_vector[i].drv_inst = pinstance; -- cgit From db7157d4cfce6edf052452fb1d327d4d11b67f4c Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Tue, 26 Aug 2014 17:10:41 -0400 Subject: qla2xxx: Fix shost use-after-free on device removal Once calling scsi_host_put, be careful to not access qla_hw_data through the Scsi_Host private data (ie, scsi_qla_host base_vha). Fixes: fe1b806f4f71 ("qla2xxx: Refactor shutdown code so some functionality can be reused") Cc: stable@vger.kernel.org # 3.14, 3.15, 3.16 Signed-off-by: Joe Lawrence Acked-by: Chad Dupuis Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_os.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index be9698d920c2..8252c0e6682c 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3119,10 +3119,8 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha) } static void -qla2x00_clear_drv_active(scsi_qla_host_t *vha) +qla2x00_clear_drv_active(struct qla_hw_data *ha) { - struct qla_hw_data *ha = vha->hw; - if (IS_QLA8044(ha)) { qla8044_idc_lock(ha); qla8044_clear_drv_active(ha); @@ -3193,7 +3191,7 @@ qla2x00_remove_one(struct pci_dev *pdev) scsi_host_put(base_vha->host); - qla2x00_clear_drv_active(base_vha); + qla2x00_clear_drv_active(ha); qla2x00_unmap_iobases(ha); -- cgit From 1a2fbf185cce45b80cfdb441fb6651c07915ddb1 Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Tue, 26 Aug 2014 17:11:18 -0400 Subject: qla2xxx: Use qla2x00_clear_drv_active on probe failure Take advantage of commit fe1b806f4f71 ("qla2xxx: Refactor shutdown code so some functionality can be reused") to remove an inlined copy of qla2x00_clear_drv_active in the driver's probe hardware error path. Signed-off-by: Joe Lawrence Acked-by: Chad Dupuis Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_os.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8252c0e6682c..53449d7eab91 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -238,6 +238,7 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *); static int qla2x00_change_queue_depth(struct scsi_device *, int, int); static int qla2x00_change_queue_type(struct scsi_device *, int); +static void qla2x00_clear_drv_active(struct qla_hw_data *); static void qla2x00_free_device(scsi_qla_host_t *); struct scsi_host_template qla2xxx_driver_template = { @@ -2954,16 +2955,8 @@ probe_failed: scsi_host_put(base_vha->host); probe_hw_failed: - if (IS_QLA82XX(ha)) { - qla82xx_idc_lock(ha); - qla82xx_clear_drv_active(ha); - qla82xx_idc_unlock(ha); - } - if (IS_QLA8044(ha)) { - qla8044_idc_lock(ha); - qla8044_clear_drv_active(ha); - qla8044_idc_unlock(ha); - } + qla2x00_clear_drv_active(ha); + iospace_config_failed: if (IS_P3P_TYPE(ha)) { if (!ha->nx_pcibase) -- cgit From c821e0d5b20006acdaca7aa378097a084986e37b Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Tue, 26 Aug 2014 17:11:41 -0400 Subject: qla2xxx: Collect PCI register checks and board_disable scheduling Add an uint16_t variant of qla2x00_check_reg_for_disconnect and use these routines to check and schedule a PCI-disconnected board from a centralized place. Signed-off-by: Joe Lawrence Acked-by: Chad Dupuis Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_gbl.h | 3 ++- drivers/scsi/qla2xxx/qla_isr.c | 28 +++++++++++++--------------- drivers/scsi/qla2xxx/qla_mr.c | 2 +- drivers/scsi/qla2xxx/qla_nx.c | 6 +++--- drivers/scsi/qla2xxx/qla_os.c | 8 +------- 5 files changed, 20 insertions(+), 27 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index d646540db3ac..43ef0db9654e 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -475,7 +475,8 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, uint32_t); extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t); -bool qla2x00_check_reg_for_disconnect(scsi_qla_host_t *, uint32_t); +bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t); +bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t); extern int qla2x00_beacon_on(struct scsi_qla_host *); extern int qla2x00_beacon_off(struct scsi_qla_host *); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index bea0e7458ee1..016ebed880f5 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -56,16 +56,8 @@ qla2100_intr_handler(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { hccr = RD_REG_WORD(®->hccr); - /* Check for PCI disconnection */ - if (hccr == 0xffff) { - /* - * Schedule this on the default system workqueue so that - * all the adapter workqueues and the DPC thread can be - * shutdown cleanly. - */ - schedule_work(&ha->board_disable); + if (qla2x00_check_reg16_for_disconnect(vha, hccr)) break; - } if (hccr & HCCR_RISC_PAUSE) { if (pci_channel_offline(ha->pdev)) break; @@ -121,7 +113,7 @@ qla2100_intr_handler(int irq, void *dev_id) } bool -qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) +qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) { /* Check for PCI disconnection */ if (reg == 0xffffffff) { @@ -136,6 +128,12 @@ qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) return false; } +bool +qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) +{ + return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); +} + /** * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. * @irq: @@ -174,7 +172,7 @@ qla2300_intr_handler(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->u.isp2300.host_status); - if (qla2x00_check_reg_for_disconnect(vha, stat)) + if (qla2x00_check_reg32_for_disconnect(vha, stat)) break; if (stat & HSR_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) @@ -2631,7 +2629,7 @@ qla24xx_intr_handler(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->host_status); - if (qla2x00_check_reg_for_disconnect(vha, stat)) + if (qla2x00_check_reg32_for_disconnect(vha, stat)) break; if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) @@ -2721,7 +2719,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) * we process the response queue. */ stat = RD_REG_DWORD(®->host_status); - if (qla2x00_check_reg_for_disconnect(vha, stat)) + if (qla2x00_check_reg32_for_disconnect(vha, stat)) goto out; qla24xx_process_response_queue(vha, rsp); if (!ha->flags.disable_msix_handshake) { @@ -2761,7 +2759,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) hccr = RD_REG_DWORD_RELAXED(®->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); } - if (qla2x00_check_reg_for_disconnect(vha, hccr)) + if (qla2x00_check_reg32_for_disconnect(vha, hccr)) goto out; queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); @@ -2796,7 +2794,7 @@ qla24xx_msix_default(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); do { stat = RD_REG_DWORD(®->host_status); - if (qla2x00_check_reg_for_disconnect(vha, stat)) + if (qla2x00_check_reg32_for_disconnect(vha, stat)) break; if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 4775baa8b6a0..8ecf6decea67 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -2924,7 +2924,7 @@ qlafx00_intr_handler(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; clr_intr = 0) { stat = QLAFX00_RD_INTR_REG(ha); - if (qla2x00_check_reg_for_disconnect(vha, stat)) + if (qla2x00_check_reg32_for_disconnect(vha, stat)) break; intr_stat = stat & QLAFX00_HST_INT_STS_BITS; if (!intr_stat) diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 58f3c912d96e..25626006baff 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2123,7 +2123,7 @@ qla82xx_msix_default(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); do { host_int = RD_REG_DWORD(®->host_int); - if (qla2x00_check_reg_for_disconnect(vha, host_int)) + if (qla2x00_check_reg32_for_disconnect(vha, host_int)) break; if (host_int) { stat = RD_REG_DWORD(®->host_status); @@ -2184,7 +2184,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); host_int = RD_REG_DWORD(®->host_int); - if (qla2x00_check_reg_for_disconnect(vha, host_int)) + if (qla2x00_check_reg32_for_disconnect(vha, host_int)) goto out; qla24xx_process_response_queue(vha, rsp); WRT_REG_DWORD(®->host_int, 0); @@ -2219,7 +2219,7 @@ qla82xx_poll(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); host_int = RD_REG_DWORD(®->host_int); - if (qla2x00_check_reg_for_disconnect(vha, host_int)) + if (qla2x00_check_reg32_for_disconnect(vha, host_int)) goto out; if (host_int) { stat = RD_REG_DWORD(®->host_status); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 53449d7eab91..3bfa89d1da75 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -5183,13 +5183,7 @@ qla2x00_timer(scsi_qla_host_t *vha) */ if (!pci_channel_offline(ha->pdev)) { pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); - if (w == 0xffff) - /* - * Schedule this on the default system workqueue so that - * all the adapter workqueues and the DPC thread can be - * shutdown cleanly. - */ - schedule_work(&ha->board_disable); + qla2x00_check_reg16_for_disconnect(vha, w); } /* Make sure qla82xx_watchdog is run only for physical port */ -- cgit From 232792b6b43b1420324e432a0498602b9c8d5a8c Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Tue, 26 Aug 2014 17:12:01 -0400 Subject: qla2xxx: Schedule board_disable only once There are various callers of qla2x00_check_reg{32,16}_for_disconnect that may schedule board removal on PCI-disconnect. Test-and-set a dedicated flag before scheduling board_disable so it is invoked only once. Signed-off-by: Joe Lawrence Acked-by: Chad Dupuis Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_def.h | 3 +++ drivers/scsi/qla2xxx/qla_isr.c | 14 ++++++++------ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index b64399153135..4267ec6f3258 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3402,6 +3402,9 @@ typedef struct scsi_qla_host { #define FX00_CRITEMP_RECOVERY 25 #define FX00_HOST_INFO_RESEND 26 + unsigned long pci_flags; +#define PFLG_DISCONNECTED 0 /* PCI device removed */ + uint32_t device_flags; #define SWITCH_FOUND BIT_0 #define DFLG_NO_CABLE BIT_1 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 016ebed880f5..fd75b9139693 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -117,12 +117,14 @@ qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) { /* Check for PCI disconnection */ if (reg == 0xffffffff) { - /* - * Schedule this on the default system workqueue so that all the - * adapter workqueues and the DPC thread can be shutdown - * cleanly. - */ - schedule_work(&vha->hw->board_disable); + if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags)) { + /* + * Schedule this (only once) on the default system + * workqueue so that all the adapter workqueues and the + * DPC thread can be shutdown cleanly. + */ + schedule_work(&vha->hw->board_disable); + } return true; } else return false; -- cgit From beb9e315e6e0d8d1d7d3a79d2e5d4664aa8f8796 Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Tue, 26 Aug 2014 17:12:14 -0400 Subject: qla2xxx: Prevent removal and board_disable race Introduce mutual exclusion between the qla2xxx_remove_one PCI driver callback and qla2x00_disable_board_on_pci_error, which is scheduled as board_disable work by qla2x00_check_reg{32,16}_for_disconnect: * Leave the driver-specific data attached to the underlying PCI device intact in qla2x00_disable_board_on_pci_error, so that qla2x00_remove_one has enough breadcrumbs to determine that any board_disable work has been completed. * In qla2xxx_remove_one, set a bit to prevent any subsequent board_disable work from scheduling, then cancel and wait until pending work has completed. * Reuse the PCI device enable count check in qla2x00_remove_one to determine if board_disable has occured. The original purpose of this check was unnecessary since the driver remove function wasn't called when the probe fails. Signed-off-by: Joe Lawrence Acked-by: Chad Dupuis Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_def.h | 1 + drivers/scsi/qla2xxx/qla_isr.c | 3 ++- drivers/scsi/qla2xxx/qla_os.c | 31 +++++++++++++++++++------------ 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 4267ec6f3258..13988180a48c 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3404,6 +3404,7 @@ typedef struct scsi_qla_host { unsigned long pci_flags; #define PFLG_DISCONNECTED 0 /* PCI device removed */ +#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */ uint32_t device_flags; #define SWITCH_FOUND BIT_0 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index fd75b9139693..341c64daa959 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -117,7 +117,8 @@ qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) { /* Check for PCI disconnection */ if (reg == 0xffffffff) { - if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags)) { + if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && + !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { /* * Schedule this (only once) on the default system * workqueue so that all the adapter workqueues and the diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 3bfa89d1da75..84d4df6e6221 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3131,15 +3131,25 @@ qla2x00_remove_one(struct pci_dev *pdev) scsi_qla_host_t *base_vha; struct qla_hw_data *ha; + base_vha = pci_get_drvdata(pdev); + ha = base_vha->hw; + + /* Indicate device removal to prevent future board_disable and wait + * until any pending board_disable has completed. */ + set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); + cancel_work_sync(&ha->board_disable); + /* - * If the PCI device is disabled that means that probe failed and any - * resources should be have cleaned up on probe exit. + * If the PCI device is disabled then there was a PCI-disconnect and + * qla2x00_disable_board_on_pci_error has taken care of most of the + * resources. */ - if (!atomic_read(&pdev->enable_cnt)) + if (!atomic_read(&pdev->enable_cnt)) { + scsi_host_put(base_vha->host); + kfree(ha); + pci_set_drvdata(pdev, NULL); return; - - base_vha = pci_get_drvdata(pdev); - ha = base_vha->hw; + } qla2x00_wait_for_hba_ready(base_vha); @@ -4799,18 +4809,15 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) qla82xx_md_free(base_vha); qla2x00_free_queues(ha); - scsi_host_put(base_vha->host); - qla2x00_unmap_iobases(ha); pci_release_selected_regions(ha->pdev, ha->bars); - kfree(ha); - ha = NULL; - pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); + /* + * Let qla2x00_remove_one cleanup qla_hw_data on device removal. + */ } /************************************************************************** -- cgit From 6b3839790b16adffbbe2f5967e149562a5a603e8 Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Tue, 26 Aug 2014 17:12:29 -0400 Subject: qla2xxx: Prevent probe and board_disable race The PCI register read checking introduced in commit fe1b806f4f71 ("qla2xxx: Disable adapter when we encounter a PCI disconnect") is active during driver probe. Hold off scheduling any board removal until the driver probe has completed. This ensures that the the board_disable work structure is initialized and more importantly, avoids racing qla2x00_probe_one. Signed-off-by: Joe Lawrence Acked-by: Chad Dupuis Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_def.h | 1 + drivers/scsi/qla2xxx/qla_isr.c | 3 ++- drivers/scsi/qla2xxx/qla_os.c | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 13988180a48c..de5a9c471cf9 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3405,6 +3405,7 @@ typedef struct scsi_qla_host { unsigned long pci_flags; #define PFLG_DISCONNECTED 0 /* PCI device removed */ #define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */ +#define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */ uint32_t device_flags; #define SWITCH_FOUND BIT_0 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 341c64daa959..a0992bfab7bf 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -118,7 +118,8 @@ qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) /* Check for PCI disconnection */ if (reg == 0xffffffff) { if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && - !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { + !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && + !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { /* * Schedule this (only once) on the default system * workqueue so that all the adapter workqueues and the diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 84d4df6e6221..f93785957b72 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2632,6 +2632,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) } pci_set_drvdata(pdev, base_vha); + set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); host = base_vha->host; base_vha->req = req; @@ -2928,6 +2929,7 @@ skip_dpc: qlt_add_target(ha, base_vha); + clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); return 0; probe_init_failed: -- cgit From 86001f248e943b7b22c22b50151ffaee9447df2d Mon Sep 17 00:00:00 2001 From: Hiral Shah Date: Fri, 2 May 2014 17:46:31 -0700 Subject: fnic: assign FIP_ALL_FCF_MACS to fcoe_all_fcfs 1) Assgning FIP_ALL_FCF_MACS to fcoe_all_fcfs allows VLAN request to be sent to correct Mac address for VLAN Discovery otherwise VLAN request will be sent to invalid address hence FLOGI never happens. 2) Simplify the copy_and_format_trace_data code and log the correct Link event for fnic control path tracing in case of link status UP->UP. 3) Increment Fnic driver version Signed-off-by: Hiral Shah Signed-off-by: Sesidhar Baddela Signed-off-by: Christoph Hellwig --- drivers/scsi/fnic/fnic.h | 2 +- drivers/scsi/fnic/fnic_fcs.c | 5 +++-- drivers/scsi/fnic/fnic_trace.c | 5 ++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 1d3521e13d77..bf8d34c26f13 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -39,7 +39,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.6.0.10" +#define DRV_VERSION "1.6.0.11" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 1b948f633fc5..f3984b48f8e9 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -35,7 +35,7 @@ #include "cq_enet_desc.h" #include "cq_exch_desc.h" -static u8 fcoe_all_fcfs[ETH_ALEN]; +static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; struct workqueue_struct *fnic_fip_queue; struct workqueue_struct *fnic_event_queue; @@ -101,13 +101,14 @@ void fnic_handle_link(struct work_struct *work) FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); fcoe_ctlr_link_up(&fnic->ctlr); - } else + } else { /* UP -> UP */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); fnic_fc_trace_set_data( fnic->lport->host->host_no, FNIC_FC_LE, "Link Status: UP_UP", strlen("Link Status: UP_UP")); + } } } else if (fnic->link_status) { /* DOWN -> UP */ diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index c77285926827..121a5d7e98c4 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -743,7 +743,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata, fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t"; len += snprintf(fnic_dbgfs_prt->buffer + len, - (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, + max_size - len, fmt, tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900, tm.tm_hour, tm.tm_min, tm.tm_sec, @@ -767,8 +767,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata, j == ethhdr_len + fcoehdr_len + fchdr_len || (i > 3 && j%fchdr_len == 0)) { len += snprintf(fnic_dbgfs_prt->buffer - + len, (fnic_fc_trace_max_pages - * PAGE_SIZE * 3) - len, + + len, max_size - len, "\n\t\t\t\t\t\t\t\t"); i++; } -- cgit From 5d8b81676829c14af98aec2233151aad02f5fe9d Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Wed, 3 Sep 2014 12:55:04 -0400 Subject: lpfc: mark functions as static in lpfc/lpfc_sli.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [PATCH 22/55] scsi: Mark functions as static in lpfc/lpfc_sli.c Mark functions as static in lpfc/lpfc_sli.c because they are not used outside this file. This eliminates the following warnings in lpfc/lpfc_sli.c: drivers/scsi/lpfc/lpfc_sli.c:13867:1: warning: no previous prototype for ‘lpfc_sli4_alloc_xri’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_sli.c:13897:1: warning: no previous prototype for ‘__lpfc_sli4_free_xri’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_sli.c:14317:1: warning: no previous prototype for ‘lpfc_update_rcv_time_stamp’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_sli.c:14786:1: warning: no previous prototype for ‘lpfc_sli4_handle_unsol_abort’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_sli.c:15331:1: warning: no previous prototype for ‘__lpfc_sli4_free_rpi’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_sli.c:15769:1: warning: no previous prototype for ‘lpfc_check_next_fcf_pri_level’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_sli.c:16000:1: warning: no previous prototype for ‘lpfc_mbx_cmpl_redisc_fcf_table’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Reviewed-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_sli.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 32ada0505576..04a8b749b67a 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -14270,7 +14270,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba, * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful * LPFC_RPI_ALLOC_ERROR if no rpis are available. **/ -uint16_t +static uint16_t lpfc_sli4_alloc_xri(struct lpfc_hba *phba) { unsigned long xri; @@ -14300,7 +14300,7 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba) * This routine is invoked to release an xri to the pool of * available rpis maintained by the driver. **/ -void +static void __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) { if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { @@ -14720,7 +14720,7 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, * the driver uses this time stamp to indicate if any received sequences have * timed out. **/ -void +static void lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) { struct lpfc_dmabuf *h_buf; @@ -15189,7 +15189,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, * unsolicited sequence has been aborted. After that, it will issue a basic * accept to accept the abort. **/ -void +static void lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) { @@ -15734,7 +15734,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) * This routine is invoked to release an rpi to the pool of * available rpis maintained by the driver. **/ -void +static void __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) { if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { @@ -16172,7 +16172,7 @@ fail_fcf_read: * returns: * 1=success 0=failure **/ -int +static int lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) { uint16_t next_fcf_pri; @@ -16403,7 +16403,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) * command. If the mailbox command returned failure, it will try to stop the * FCF rediscover wait timer. **/ -void +static void lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; -- cgit From b86a675672e471b495b9de7f240d3786548d25e1 Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Wed, 3 Sep 2014 12:55:17 -0400 Subject: lpfc: mark functions as static in lpfc/lpfc_hbadisc.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mark functions as static in lpfc/lpfc_hbadisc.c because they are not used outside this file. This eliminates the following warnings in lpfc/lpfc_hbadisc.c: drivers/scsi/lpfc/lpfc_hbadisc.c:2047:5: warning: no previous prototype for ‘lpfc_sli4_fcf_pri_list_add’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_hbadisc.c:2681:1: warning: no previous prototype for ‘lpfc_init_vfi_cmpl’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_hbadisc.c:4432:1: warning: no previous prototype for ‘lpfc_nlp_logo_unreg’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Reviewed-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_hbadisc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2a17e31265b8..667d124e3608 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -2042,7 +2042,8 @@ lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) * returns: * 0=success 1=failure **/ -int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index, +static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, + uint16_t fcf_index, struct fcf_record *new_fcf_record) { uint16_t current_fcf_pri; @@ -2678,7 +2679,7 @@ out: * * This function handles completion of init vfi mailbox command. */ -void +static void lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; @@ -4438,7 +4439,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * This function will issue an ELS LOGO command after completing * the UNREG_RPI. **/ -void +static void lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; -- cgit From e399b22881c721626812008e25d930ac4d2bafc4 Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Wed, 3 Sep 2014 12:55:28 -0400 Subject: lpfc: mark functions as static in lpfc/lpfc_init.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mark functions as static in lpfc/lpfc_init.c because they are not used outside this file. This eliminates the following warning in lpfc/lpfc_init.c: drivers/scsi/lpfc/lpfc_init.c:652:1: warning: no previous prototype for ‘lpfc_hba_init_link’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_init.c:753:1: warning: no previous prototype for ‘lpfc_hba_down_link’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_init.c:3434:1: warning: no previous prototype for ‘lpfc_sli4_fcf_redisc_wait_tmo’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Reviewed-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_init.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a5769a9960ac..60d9518a3e6b 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -649,7 +649,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) * 0 - success * Any other value - error **/ -int +static int lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) { return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); @@ -750,7 +750,7 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, * 0 - success * Any other value - error **/ -int +static int lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) { LPFC_MBOXQ_t *pmb; @@ -3550,7 +3550,7 @@ lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) * list, and then worker thread shall be waked up for processing from the * worker thread context. **/ -void +static void lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) { struct lpfc_hba *phba = (struct lpfc_hba *)ptr; -- cgit From 7bfe781edc26b01ebecef58be0aae91058544c9f Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Wed, 3 Sep 2014 12:55:36 -0400 Subject: lpfc: mark functions as static in lpfc/lpfc_scsi.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mark functions as static in lpfc/lpfc_scsi.c because they are not used outside this file. This eliminates the following warnings in lpfc/lpfc_scsi.c: drivers/scsi/lpfc/lpfc_scsi.c:299:1: warning: no previous prototype for ‘lpfc_change_queue_depth’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_scsi.c:795:1: warning: no previous prototype for ‘lpfc_sli4_post_scsi_sgl_list’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_scsi.c:3019:1: warning: no previous prototype for ‘lpfc_bg_crc’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_scsi.c:3035:1: warning: no previous prototype for ‘lpfc_bg_csum’ [-Wmissing-prototypes] drivers/scsi/lpfc/lpfc_scsi.c:3048:1: warning: no previous prototype for ‘lpfc_calc_bg_err’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Reviewed-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_scsi.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 7862c5540861..3c250e3e41ca 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -306,7 +306,7 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, * depth for a scsi device. This function sets the queue depth to the new * value and sends an event out to log the queue depth change. **/ -int +static int lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; @@ -741,7 +741,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, * * Returns: 0 = failure, non-zero number of successfully posted buffers. **/ -int +static int lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, struct list_head *post_sblist, int sb_count) { @@ -2965,7 +2965,7 @@ err: * on the specified data using a CRC algorithmn * using crc_t10dif. */ -uint16_t +static uint16_t lpfc_bg_crc(uint8_t *data, int count) { uint16_t crc = 0; @@ -2981,7 +2981,7 @@ lpfc_bg_crc(uint8_t *data, int count) * on the specified data using a CSUM algorithmn * using ip_compute_csum. */ -uint16_t +static uint16_t lpfc_bg_csum(uint8_t *data, int count) { uint16_t ret; @@ -2994,7 +2994,7 @@ lpfc_bg_csum(uint8_t *data, int count) * This function examines the protection data to try to determine * what type of T10-DIF error occurred. */ -void +static void lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) { struct scatterlist *sgpe; /* s/g prot entry */ -- cgit From 9ab9b134a86ed9f897a29d2ba5abb93f7b162dca Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Wed, 3 Sep 2014 12:55:46 -0400 Subject: lpfc: mark function as static in lpfc/lpfc_bsg.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [PATCH 26/55] scsi: Mark function as static in lpfc/lpfc_bsg.c Mark function as static in lpfc/lpfc_bsg.c because it is not used outside this file. This eliminates the following warning in lpfc/lpfc_bsg.c: drivers/scsi/lpfc/lpfc_bsg.c:3348:1: warning: no previous prototype for ‘lpfc_bsg_issue_mbox_cmpl’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Reviewed-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_bsg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 5b5c825d9576..371474bec5a5 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -3344,7 +3344,7 @@ job_error: * will wake up thread waiting on the wait queue pointed by context1 * of the mailbox. **/ -void +static void lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job_data *dd_data; -- cgit From 1aee383d5912de15af3045a63a07e98f760f041c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 3 Sep 2014 12:56:12 -0400 Subject: lpfc: use dma_zalloc_coherent Use the zeroing function instead of dma_alloc_coherent & memset(,0,) Signed-off-by: Joe Perches Reviewed-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_bsg.c | 5 ++--- drivers/scsi/lpfc/lpfc_init.c | 22 +++++++--------------- drivers/scsi/lpfc/lpfc_mbox.c | 6 +++--- drivers/scsi/lpfc/lpfc_sli.c | 14 +++++--------- 4 files changed, 17 insertions(+), 30 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 371474bec5a5..d236448b05b9 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2693,14 +2693,13 @@ lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) INIT_LIST_HEAD(&dmabuf->list); /* now, allocate dma buffer */ - dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, - &(dmabuf->phys), GFP_KERNEL); + dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, + &(dmabuf->phys), GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); return NULL; } - memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE); return dmabuf; } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 60d9518a3e6b..990c3a29c51f 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -5848,16 +5848,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) if (!dmabuf) return NULL; - dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, - LPFC_HDR_TEMPLATE_SIZE, - &dmabuf->phys, - GFP_KERNEL); + dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, + LPFC_HDR_TEMPLATE_SIZE, + &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { rpi_hdr = NULL; goto err_free_dmabuf; } - memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { rpi_hdr = NULL; goto err_free_coherent; @@ -6246,14 +6244,11 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) } /* Allocate memory for SLI-2 structures */ - phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, - SLI2_SLIM_SIZE, - &phba->slim2p.phys, - GFP_KERNEL); + phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, + &phba->slim2p.phys, GFP_KERNEL); if (!phba->slim2p.virt) goto out_iounmap; - memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); phba->mbox_ext = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx_ext_words)); @@ -6618,15 +6613,12 @@ lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) * plus an alignment restriction of 16 bytes. */ bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); - dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, - bmbx_size, - &dmabuf->phys, - GFP_KERNEL); + dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, + &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); return -ENOMEM; } - memset(dmabuf->virt, 0, bmbx_size); /* * Initialize the bootstrap mailbox pointers now so that the register diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 1f292e29d566..06241f590c1e 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1811,12 +1811,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, * page, this is used as a priori size of SLI4_PAGE_SIZE for * the later DMA memory free. */ - viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, - &phyaddr, GFP_KERNEL); + viraddr = dma_zalloc_coherent(&phba->pcidev->dev, + SLI4_PAGE_SIZE, &phyaddr, + GFP_KERNEL); /* In case of malloc fails, proceed with whatever we have */ if (!viraddr) break; - memset(viraddr, 0, SLI4_PAGE_SIZE); mbox->sge_array->addr[pagen] = viraddr; /* Keep the first page for later sub-header construction */ if (pagen == 0) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 04a8b749b67a..27221cb21bb8 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4864,15 +4864,12 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, * mailbox command. */ dma_size = *vpd_size; - dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, - dma_size, - &dmabuf->phys, - GFP_KERNEL); + dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, + &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); return -ENOMEM; } - memset(dmabuf->virt, 0, dma_size); /* * The SLI4 implementation of READ_REV conflicts at word1, @@ -12760,14 +12757,13 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) goto out_fail; - dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, - hw_page_size, &dmabuf->phys, - GFP_KERNEL); + dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, + hw_page_size, &dmabuf->phys, + GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); goto out_fail; } - memset(dmabuf->virt, 0, hw_page_size); dmabuf->buffer_tag = x; list_add_tail(&dmabuf->list, &queue->page_list); /* initialize queue's entry array */ -- cgit From 0d4aec132d8ad6303981db08a52d64b0dbe59e97 Mon Sep 17 00:00:00 2001 From: Manuel Schölling Date: Wed, 3 Sep 2014 12:55:58 -0400 Subject: lpfc: use time_after() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To be future-proof and for better readability the time comparisons are modified to use time_after() instead of plain, error-prone math. Signed-off-by: Manuel Schölling Reviewed-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_scsi.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 3c250e3e41ca..6094545883a3 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -380,12 +380,14 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba) { unsigned long flags; uint32_t evt_posted; + unsigned long expires; spin_lock_irqsave(&phba->hbalock, flags); atomic_inc(&phba->num_rsrc_err); phba->last_rsrc_error_time = jiffies; - if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { + expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; + if (time_after(expires, jiffies)) { spin_unlock_irqrestore(&phba->hbalock, flags); return; } -- cgit From a7901acc4ac0f853d9aff284ff96e4a56ff74aa8 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 3 Sep 2014 12:56:20 -0400 Subject: lpfc: do not feed jiffies as random seed from lpfc driver In prandom we have already reseeding mechanisms that trigger periodically from a much better entropy source than just feeding in jiffies through lpfc_mbx_cmpl_fcf_scan_read_fcf_rec() [what a function name 8-)]. Therefore, just remove this. Signed-off-by: Daniel Borkmann Reviewed-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_hbadisc.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 667d124e3608..859fffa739d8 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -2147,7 +2147,6 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) uint16_t fcf_index, next_fcf_index; struct lpfc_fcf_rec *fcf_rec = NULL; uint16_t vlan_id; - uint32_t seed; bool select_new_fcf; int rc; @@ -2384,9 +2383,6 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->fcf.fcf_flag |= FCF_AVAILABLE; /* Setup initial running random FCF selection count */ phba->fcf.eligible_fcf_cnt = 1; - /* Seeding the random number generator for random selection */ - seed = (uint32_t)(0xFFFFFFFF & jiffies); - prandom_seed(seed); } spin_unlock_irq(&phba->hbalock); goto read_next_fcf; -- cgit From 4f871e1b27a7c7254ead541ad6405f339790b6c5 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Wed, 3 Sep 2014 12:56:29 -0400 Subject: lpfc: Use pci_enable_msix_range() instead of pci_enable_msix() As result of deprecation of MSI-X/MSI enablement functions pci_enable_msix() and pci_enable_msi_block() all drivers using these two interfaces need to be updated to use the new pci_enable_msi_range() or pci_enable_msi_exact() and pci_enable_msix_range() or pci_enable_msix_exact() interfaces. Signed-off-by: Alexander Gordeev Reviewed-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_init.c | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 990c3a29c51f..1953b3bbd60a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -8203,9 +8203,9 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device - * with SLI-3 interface specs. The kernel function pci_enable_msix() is - * called to enable the MSI-X vectors. Note that pci_enable_msix(), once - * invoked, enables either all or nothing, depending on the current + * with SLI-3 interface specs. The kernel function pci_enable_msix_exact() + * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(), + * once invoked, enables either all or nothing, depending on the current * availability of PCI vector resources. The device driver is responsible * for calling the individual request_irq() to register each MSI-X vector * with a interrupt handler, which is done in this function. Note that @@ -8229,8 +8229,8 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba) phba->msix_entries[i].entry = i; /* Configure MSI-X capability structure */ - rc = pci_enable_msix(phba->pcidev, phba->msix_entries, - ARRAY_SIZE(phba->msix_entries)); + rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries, + LPFC_MSIX_VECTORS); if (rc) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0420 PCI enable MSI-X failed (%d)\n", rc); @@ -8767,16 +8767,14 @@ out: * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device - * with SLI-4 interface spec. The kernel function pci_enable_msix() is called - * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, - * enables either all or nothing, depending on the current availability of - * PCI vector resources. The device driver is responsible for calling the - * individual request_irq() to register each MSI-X vector with a interrupt - * handler, which is done in this function. Note that later when device is - * unloading, the driver should always call free_irq() on all MSI-X vectors - * it has done request_irq() on before calling pci_disable_msix(). Failure - * to do so results in a BUG_ON() and a device will be left with MSI-X - * enabled and leaks its vectors. + * with SLI-4 interface spec. The kernel function pci_enable_msix_range() + * is called to enable the MSI-X vectors. The device driver is responsible + * for calling the individual request_irq() to register each MSI-X vector + * with a interrupt handler, which is done in this function. Note that + * later when device is unloading, the driver should always call free_irq() + * on all MSI-X vectors it has done request_irq() on before calling + * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device + * will be left with MSI-X enabled and leaks its vectors. * * Return codes * 0 - successful @@ -8797,17 +8795,14 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) phba->sli4_hba.msix_entries[index].entry = index; vectors++; } -enable_msix_vectors: - rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, - vectors); - if (rc > 1) { - vectors = rc; - goto enable_msix_vectors; - } else if (rc) { + rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries, + 2, vectors); + if (rc < 0) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0484 PCI enable MSI-X failed (%d)\n", rc); goto vec_fail_out; } + vectors = rc; /* Log MSI-X vector assignment */ for (index = 0; index < vectors; index++) -- cgit From dafe8ceaa89577062c2364139997f04a32f77502 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 3 Sep 2014 12:56:40 -0400 Subject: lpfc: fix discovery timeout during nameserver login Fix discovery timeout during nameserver login Signed-off-by: James Smart Signed-off-by: Dick Kennedy Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_els.c | 5 +++++ drivers/scsi/lpfc/lpfc_init.c | 15 +++++++++++++++ drivers/scsi/lpfc/lpfc_sli.c | 10 +++++++++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 7a5d81a65be8..30ec80f32d1a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -8187,9 +8187,11 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, list_del(&sglq_entry->list); ndlp = sglq_entry->ndlp; sglq_entry->ndlp = NULL; + spin_lock(&pring->ring_lock); list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); sglq_entry->state = SGL_FREED; + spin_unlock(&pring->ring_lock); spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_set_rrq_active(phba, ndlp, @@ -8208,12 +8210,15 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, spin_unlock_irqrestore(&phba->hbalock, iflag); return; } + spin_lock(&pring->ring_lock); sglq_entry = __lpfc_get_active_sglq(phba, lxri); if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { + spin_unlock(&pring->ring_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } sglq_entry->state = SGL_XRI_ABORTED; + spin_unlock(&pring->ring_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 1953b3bbd60a..7f54916c4f62 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -988,9 +988,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) LIST_HEAD(aborts); unsigned long iflag = 0; struct lpfc_sglq *sglq_entry = NULL; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; lpfc_hba_free_post_buf(phba); lpfc_hba_clean_txcmplq(phba); + pring = &psli->ring[LPFC_ELS_RING]; /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be @@ -1008,8 +1011,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) &phba->sli4_hba.lpfc_abts_els_sgl_list, list) sglq_entry->state = SGL_FREED; + spin_lock(&pring->ring_lock); list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); + spin_unlock(&pring->ring_lock); spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); /* abts_scsi_buf_list_lock required because worker thread uses this * list. @@ -3047,6 +3052,7 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) LIST_HEAD(els_sgl_list); LIST_HEAD(scsi_sgl_list); int rc; + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; /* * update on pci function's els xri-sgl list @@ -3087,7 +3093,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) list_add_tail(&sglq_entry->list, &els_sgl_list); } spin_lock_irq(&phba->hbalock); + spin_lock(&pring->ring_lock); list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { /* els xri-sgl shrinked */ @@ -3097,7 +3105,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) "%d to %d\n", phba->sli4_hba.els_xri_cnt, els_xri_cnt); spin_lock_irq(&phba->hbalock); + spin_lock(&pring->ring_lock); list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); /* release extra els sgls from list */ for (i = 0; i < xri_cnt; i++) { @@ -3110,7 +3120,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) } } spin_lock_irq(&phba->hbalock); + spin_lock(&pring->ring_lock); list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); } else lpfc_printf_log(phba, KERN_INFO, LOG_SLI, @@ -5680,10 +5692,13 @@ static void lpfc_free_els_sgl_list(struct lpfc_hba *phba) { LIST_HEAD(sglq_list); + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; /* Retrieve all els sgls from driver list */ spin_lock_irq(&phba->hbalock); + spin_lock(&pring->ring_lock); list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); /* Now free the sgl list */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 27221cb21bb8..8f3be2a5dc3f 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -937,7 +937,7 @@ out: * @phba: Pointer to HBA context object. * @piocb: Pointer to the iocbq. * - * This function is called with hbalock held. This function + * This function is called with the ring lock held. This function * gets a new driver sglq object from the sglq list. If the * list is not empty then it is successful, it returns pointer to the newly * allocated sglq object else it returns NULL. @@ -1053,10 +1053,12 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) spin_unlock_irqrestore( &phba->sli4_hba.abts_sgl_list_lock, iflag); } else { + spin_lock_irqsave(&pring->ring_lock, iflag); sglq->state = SGL_FREED; sglq->ndlp = NULL; list_add_tail(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); + spin_unlock_irqrestore(&pring->ring_lock, iflag); /* Check if TXQ queue needs to be serviced */ if (!list_empty(&pring->txq)) @@ -6098,14 +6100,18 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) struct lpfc_sglq *sglq_entry_first = NULL; int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; int last_xritag = NO_XRI; + struct lpfc_sli_ring *pring; LIST_HEAD(prep_sgl_list); LIST_HEAD(blck_sgl_list); LIST_HEAD(allc_sgl_list); LIST_HEAD(post_sgl_list); LIST_HEAD(free_sgl_list); + pring = &phba->sli.ring[LPFC_ELS_RING]; spin_lock_irq(&phba->hbalock); + spin_lock(&pring->ring_lock); list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); total_cnt = phba->sli4_hba.els_xri_cnt; @@ -6207,8 +6213,10 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) /* push els sgls posted to the availble list */ if (!list_empty(&post_sgl_list)) { spin_lock_irq(&phba->hbalock); + spin_lock(&pring->ring_lock); list_splice_init(&post_sgl_list, &phba->sli4_hba.lpfc_sgl_list); + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); } else { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, -- cgit From c62321978f542e82960264f175c9e33d7279bec5 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 3 Sep 2014 12:56:48 -0400 Subject: lpfc: fix quarantined XRI recovery qualifier state in link bounce Fix quarantined XRI recovery qualifier state in link bounce Signed-off-by: James Smart Signed-off-by: Dick Kennedy Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_crtn.h | 1 - drivers/scsi/lpfc/lpfc_hbadisc.c | 1 - drivers/scsi/lpfc/lpfc_sli.c | 36 ------------------------------------ 3 files changed, 38 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index db5604f01a1a..00665a5d92fd 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -451,7 +451,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *); int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t, uint16_t, uint16_t); uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t); -void lpfc_cleanup_wt_rrqs(struct lpfc_hba *); void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, uint32_t); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 859fffa739d8..d178aee02b49 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -995,7 +995,6 @@ lpfc_linkup(struct lpfc_hba *phba) struct lpfc_vport **vports; int i; - lpfc_cleanup_wt_rrqs(phba); phba->link_state = LPFC_LINK_UP; /* Unblock fabric iocbs if they are blocked */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8f3be2a5dc3f..deb7f126a2f1 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -785,42 +785,6 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } } -/** - * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list. - * @phba: Pointer to HBA context object. - * - * Remove all rrqs from the phba->active_rrq_list and free them by - * calling __lpfc_clr_active_rrq - * - **/ -void -lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) -{ - struct lpfc_node_rrq *rrq; - struct lpfc_node_rrq *nextrrq; - unsigned long next_time; - unsigned long iflags; - LIST_HEAD(rrq_list); - - if (phba->sli_rev != LPFC_SLI_REV4) - return; - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)); - list_splice_init(&phba->active_rrq_list, &rrq_list); - spin_unlock_irqrestore(&phba->hbalock, iflags); - - list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { - list_del(&rrq->list); - lpfc_clr_rrq_active(phba, rrq->xritag, rrq); - } - if ((!list_empty(&phba->active_rrq_list)) && - (!(phba->pport->load_flag & FC_UNLOADING))) - - mod_timer(&phba->rrq_tmr, next_time); -} - - /** * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. * @phba: Pointer to HBA context object. -- cgit From 2f6fa2c911167e7a3fda130689a36f55b39ed86d Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 3 Sep 2014 12:57:08 -0400 Subject: lpfc: fix IP Reset processing - wait for RDY before proceeding Fix IP Reset processing - wait for RDY before proceeding Signed-off-by: James Smart Signed-off-by: Dick Kennedy Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_init.c | 109 ++++++++++++++++++------------------------ 1 file changed, 47 insertions(+), 62 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 7f54916c4f62..33a24fc0afec 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -7903,7 +7903,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) LPFC_MBOXQ_t *mboxq; uint32_t rc = 0, if_type; uint32_t shdr_status, shdr_add_status; - uint32_t rdy_chk, num_resets = 0, reset_again = 0; + uint32_t rdy_chk; + uint32_t port_reset = 0; union lpfc_sli4_cfg_shdr *shdr; struct lpfc_register reg_data; uint16_t devid; @@ -7943,9 +7944,42 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) } break; case LPFC_SLI_INTF_IF_TYPE_2: - for (num_resets = 0; - num_resets < MAX_IF_TYPE_2_RESETS; - num_resets++) { +wait: + /* + * Poll the Port Status Register and wait for RDY for + * up to 30 seconds. If the port doesn't respond, treat + * it as an error. + */ + for (rdy_chk = 0; rdy_chk < 3000; rdy_chk++) { + if (lpfc_readl(phba->sli4_hba.u.if_type2. + STATUSregaddr, ®_data.word0)) { + rc = -ENODEV; + goto out; + } + if (bf_get(lpfc_sliport_status_rdy, ®_data)) + break; + msleep(20); + } + + if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { + phba->work_status[0] = readl( + phba->sli4_hba.u.if_type2.ERR1regaddr); + phba->work_status[1] = readl( + phba->sli4_hba.u.if_type2.ERR2regaddr); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2890 Port not ready, port status reg " + "0x%x error 1=0x%x, error 2=0x%x\n", + reg_data.word0, + phba->work_status[0], + phba->work_status[1]); + rc = -ENODEV; + goto out; + } + + if (!port_reset) { + /* + * Reset the port now + */ reg_data.word0 = 0; bf_set(lpfc_sliport_ctrl_end, ®_data, LPFC_SLIPORT_LITTLE_ENDIAN); @@ -7956,64 +7990,16 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) /* flush */ pci_read_config_word(phba->pcidev, PCI_DEVICE_ID, &devid); - /* - * Poll the Port Status Register and wait for RDY for - * up to 10 seconds. If the port doesn't respond, treat - * it as an error. If the port responds with RN, start - * the loop again. - */ - for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { - msleep(10); - if (lpfc_readl(phba->sli4_hba.u.if_type2. - STATUSregaddr, ®_data.word0)) { - rc = -ENODEV; - goto out; - } - if (bf_get(lpfc_sliport_status_rn, ®_data)) - reset_again++; - if (bf_get(lpfc_sliport_status_rdy, ®_data)) - break; - } - - /* - * If the port responds to the init request with - * reset needed, delay for a bit and restart the loop. - */ - if (reset_again && (rdy_chk < 1000)) { - msleep(10); - reset_again = 0; - continue; - } - /* Detect any port errors. */ - if ((bf_get(lpfc_sliport_status_err, ®_data)) || - (rdy_chk >= 1000)) { - phba->work_status[0] = readl( - phba->sli4_hba.u.if_type2.ERR1regaddr); - phba->work_status[1] = readl( - phba->sli4_hba.u.if_type2.ERR2regaddr); - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2890 Port error detected during port " - "reset(%d): wait_tmo:%d ms, " - "port status reg 0x%x, " - "error 1=0x%x, error 2=0x%x\n", - num_resets, rdy_chk*10, - reg_data.word0, - phba->work_status[0], - phba->work_status[1]); - rc = -ENODEV; - } - - /* - * Terminate the outer loop provided the Port indicated - * ready within 10 seconds. - */ - if (rdy_chk < 1000) - break; + port_reset = 1; + msleep(20); + goto wait; + } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { + rc = -ENODEV; + goto out; } - /* delay driver action following IF_TYPE_2 function reset */ - msleep(100); break; + case LPFC_SLI_INTF_IF_TYPE_1: default: break; @@ -8021,11 +8007,10 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) out: /* Catch the not-ready port failure after a port reset. */ - if (num_resets >= MAX_IF_TYPE_2_RESETS) { + if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3317 HBA not functional: IP Reset Failed " - "after (%d) retries, try: " - "echo fw_reset > board_mode\n", num_resets); + "try: echo fw_reset > board_mode\n"); rc = -ENODEV; } -- cgit From 12838e74f5164054fd7d5f5201a846ebb9755471 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 3 Sep 2014 12:57:19 -0400 Subject: lpfc: fix race between LOGO/PLOGI handling causing NULL pointer Fix race between LOGO/PLOGI handling causing NULL pointer Signed-off-by: James Smart Signed-off-by: Dick Kennedy Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_disc.h | 6 +++++- drivers/scsi/lpfc/lpfc_els.c | 7 +++++++ drivers/scsi/lpfc/lpfc_hbadisc.c | 19 ++++++++++++++++++- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 1a6fe524940d..6977027979be 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -78,7 +78,8 @@ struct lpfc_nodelist { struct list_head nlp_listp; struct lpfc_name nlp_portname; struct lpfc_name nlp_nodename; - uint32_t nlp_flag; /* entry flags */ + uint32_t nlp_flag; /* entry flags */ + uint32_t nlp_add_flag; /* additional flags */ uint32_t nlp_DID; /* FC D_ID of entry */ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ uint16_t nlp_type; @@ -157,6 +158,9 @@ struct lpfc_node_rrq { #define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ #define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ +/* Defines for nlp_add_flag (uint32) */ +#define NLP_IN_DEV_LOSS 0x00000001 /* Dev Loss processing in progress */ + /* ndlp usage management macros */ #define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ & NLP_USG_NODE_ACT_BIT) \ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 30ec80f32d1a..9d03e7250fb4 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -6693,6 +6693,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvFrame++; + /* + * Do not process any unsolicited ELS commands + * if the ndlp is in DEV_LOSS + */ + if (ndlp->nlp_add_flag & NLP_IN_DEV_LOSS) + goto dropit; + elsiocb->context1 = lpfc_nlp_get(ndlp); elsiocb->vport = vport; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index d178aee02b49..2d929a5e8354 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -153,6 +153,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) put_device(&rport->dev); return; } + + put_node = rdata->pnode != NULL; + put_rport = ndlp->rport != NULL; + rdata->pnode = NULL; + ndlp->rport = NULL; + if (put_node) + lpfc_nlp_put(ndlp); + if (put_rport) + put_device(&rport->dev); + return; } evtp = &ndlp->dev_loss_evt; @@ -161,6 +171,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) return; evtp->evt_arg1 = lpfc_nlp_get(ndlp); + ndlp->nlp_add_flag |= NLP_IN_DEV_LOSS; spin_lock_irq(&phba->hbalock); /* We need to hold the node by incrementing the reference @@ -201,8 +212,10 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) rport = ndlp->rport; - if (!rport) + if (!rport) { + ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; return fcf_inuse; + } rdata = rport->dd_data; name = (uint8_t *) &ndlp->nlp_portname; @@ -235,6 +248,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; + ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; if (put_node) lpfc_nlp_put(ndlp); if (put_rport) @@ -250,6 +264,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); + ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; return fcf_inuse; } @@ -259,6 +274,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; + ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; if (put_node) lpfc_nlp_put(ndlp); if (put_rport) @@ -297,6 +313,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; + ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; if (put_node) lpfc_nlp_put(ndlp); if (put_rport) -- cgit From 9bd2bff5e7140beab948ad3934f4039246748a24 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 3 Sep 2014 12:57:30 -0400 Subject: lpfc: fix locking issues with abort data paths Fix locking issues with abort data paths Signed-off-by: James Smart Signed-off-by: Dick Kennedy Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_scsi.c | 12 ++++- drivers/scsi/lpfc/lpfc_sli.c | 118 +++++++++++++++++++++++++----------------- drivers/scsi/lpfc/lpfc_sli.h | 1 + 3 files changed, 83 insertions(+), 48 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 6094545883a3..0f6be8560508 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3466,7 +3466,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) */ if ((phba->cfg_fof) && ((struct lpfc_device_data *) scsi_cmnd->device->hostdata)->oas_enabled) - lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS; + lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); return 0; } @@ -3606,6 +3606,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, */ iocb_cmd->un.fcpi.fcpi_parm = fcpdl; + /* + * If the OAS driver feature is enabled and the lun is enabled for + * OAS, set the oas iocb related flags. + */ + if ((phba->cfg_fof) && ((struct lpfc_device_data *) + scsi_cmnd->device->hostdata)->oas_enabled) + lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); + return 0; err: if (lpfc_cmd->seg_cnt) @@ -4876,6 +4884,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abtsiocb->fcp_wqidx = iocb->fcp_wqidx; abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; + if (iocb->iocb_flag & LPFC_IO_FOF) + abtsiocb->iocb_flag |= LPFC_IO_FOF; if (lpfc_is_link_up(phba)) icmd->ulpCommand = CMD_ABORT_XRI_CN; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index deb7f126a2f1..7185aac24324 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -8753,6 +8753,37 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) return 0; } +int +lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb) +{ + uint32_t idx; + + if (phba->sli_rev == LPFC_SLI_REV4) { + if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { + /* + * fcp_wqidx should already be setup based on what + * completion queue we want to use. + */ + if (!(phba->cfg_fof) || + (!(piocb->iocb_flag & LPFC_IO_FOF))) { + if (unlikely(!phba->sli4_hba.fcp_wq)) + return LPFC_HBA_ERROR; + idx = lpfc_sli4_scmd_to_wqidx_distr(phba); + piocb->fcp_wqidx = idx; + ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; + } else { + if (unlikely(!phba->sli4_hba.oas_wq)) + return LPFC_HBA_ERROR; + idx = 0; + piocb->fcp_wqidx = idx; + ring_number = LPFC_FCP_OAS_RING; + } + } + } + return ring_number; +} + /** * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb * @phba: Pointer to HBA context object. @@ -8778,61 +8809,42 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, int rc, idx; if (phba->sli_rev == LPFC_SLI_REV4) { - if (piocb->iocb_flag & LPFC_IO_FCP) { - if (!phba->cfg_fof || (!(piocb->iocb_flag & - LPFC_IO_OAS))) { - if (unlikely(!phba->sli4_hba.fcp_wq)) - return IOCB_ERROR; - idx = lpfc_sli4_scmd_to_wqidx_distr(phba); - piocb->fcp_wqidx = idx; - ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; - } else { - if (unlikely(!phba->sli4_hba.oas_wq)) - return IOCB_ERROR; - idx = 0; - piocb->fcp_wqidx = 0; - ring_number = LPFC_FCP_OAS_RING; - } - pring = &phba->sli.ring[ring_number]; - spin_lock_irqsave(&pring->ring_lock, iflags); - rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, - flag); - spin_unlock_irqrestore(&pring->ring_lock, iflags); + ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb); + if (unlikely(ring_number == LPFC_HBA_ERROR)) + return IOCB_ERROR; + idx = piocb->fcp_wqidx; - if (lpfc_fcp_look_ahead) { - fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx]; + pring = &phba->sli.ring[ring_number]; + spin_lock_irqsave(&pring->ring_lock, iflags); + rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); + spin_unlock_irqrestore(&pring->ring_lock, iflags); - if (atomic_dec_and_test(&fcp_eq_hdl-> - fcp_eq_in_use)) { + if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) { + fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx]; - /* Get associated EQ with this index */ - fpeq = phba->sli4_hba.hba_eq[idx]; + if (atomic_dec_and_test(&fcp_eq_hdl-> + fcp_eq_in_use)) { - /* Turn off interrupts from this EQ */ - lpfc_sli4_eq_clr_intr(fpeq); + /* Get associated EQ with this index */ + fpeq = phba->sli4_hba.hba_eq[idx]; - /* - * Process all the events on FCP EQ - */ - while ((eqe = lpfc_sli4_eq_get(fpeq))) { - lpfc_sli4_hba_handle_eqe(phba, - eqe, idx); - fpeq->EQ_processed++; - } + /* Turn off interrupts from this EQ */ + lpfc_sli4_eq_clr_intr(fpeq); - /* Always clear and re-arm the EQ */ - lpfc_sli4_eq_release(fpeq, - LPFC_QUEUE_REARM); + /* + * Process all the events on FCP EQ + */ + while ((eqe = lpfc_sli4_eq_get(fpeq))) { + lpfc_sli4_hba_handle_eqe(phba, + eqe, idx); + fpeq->EQ_processed++; } - atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); - } - } else { - pring = &phba->sli.ring[ring_number]; - spin_lock_irqsave(&pring->ring_lock, iflags); - rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, - flag); - spin_unlock_irqrestore(&pring->ring_lock, iflags); + /* Always clear and re-arm the EQ */ + lpfc_sli4_eq_release(fpeq, + LPFC_QUEUE_REARM); + } + atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); } } else { /* For now, SLI2/3 will still use hbalock */ @@ -9715,6 +9727,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *abtsiocbp; IOCB_t *icmd = NULL; IOCB_t *iabt = NULL; + int ring_number; int retval; unsigned long iflags; @@ -9755,6 +9768,8 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; if (cmdiocb->iocb_flag & LPFC_IO_FCP) abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; + if (cmdiocb->iocb_flag & LPFC_IO_FOF) + abtsiocbp->iocb_flag |= LPFC_IO_FOF; if (phba->link_state >= LPFC_LINK_UP) iabt->ulpCommand = CMD_ABORT_XRI_CN; @@ -9771,6 +9786,11 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, abtsiocbp->iotag); if (phba->sli_rev == LPFC_SLI_REV4) { + ring_number = + lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp); + if (unlikely(ring_number == LPFC_HBA_ERROR)) + return 0; + pring = &phba->sli.ring[ring_number]; /* Note: both hbalock and ring_lock need to be set here */ spin_lock_irqsave(&pring->ring_lock, iflags); retval = __lpfc_sli_issue_iocb(phba, pring->ringno, @@ -10068,6 +10088,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; if (iocbq->iocb_flag & LPFC_IO_FCP) abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; + if (iocbq->iocb_flag & LPFC_IO_FOF) + abtsiocb->iocb_flag |= LPFC_IO_FOF; if (lpfc_is_link_up(phba)) abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; @@ -10167,6 +10189,8 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx; if (iocbq->iocb_flag & LPFC_IO_FCP) abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; + if (iocbq->iocb_flag & LPFC_IO_FOF) + abtsiocbq->iocb_flag |= LPFC_IO_FOF; if (lpfc_is_link_up(phba)) abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index edb48832c39b..4a01452415cf 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -79,6 +79,7 @@ struct lpfc_iocbq { #define LPFC_FIP_ELS_ID_SHIFT 14 #define LPFC_IO_OAS 0x10000 /* OAS FCP IO */ +#define LPFC_IO_FOF 0x20000 /* FOF FCP IO */ uint32_t drvrTimeout; /* driver timeout in seconds */ uint32_t fcp_wqidx; /* index to FCP work queue */ -- cgit From 7ba36effb666831ac3803ca5b8aed371e7d17c4e Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 3 Sep 2014 12:57:43 -0400 Subject: lpfc: fix crash from page fault caused by use after rport delete Fix crash from page fault caused by use after rport delete. Signed-off-by: James Smart Signed-off-by: Dick Kennedy Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_hbadisc.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2d929a5e8354..310507dede2a 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -150,6 +150,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) /* If the WWPN of the rport and ndlp don't match, ignore it */ if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + "6789 rport name %lx != node port name %lx", + (unsigned long)rport->port_name, + (unsigned long)wwn_to_u64( + ndlp->nlp_portname.u.wwn)); + put_node = rdata->pnode != NULL; + put_rport = ndlp->rport != NULL; + rdata->pnode = NULL; + ndlp->rport = NULL; + if (put_node) + lpfc_nlp_put(ndlp); put_device(&rport->dev); return; } @@ -285,6 +296,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) if (ndlp->nlp_sid != NLP_NO_SID) { warn_on = 1; /* flush the target */ + ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } -- cgit From a2fc4aefa06d8b57b6728c1787c84fb3d3c86354 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 3 Sep 2014 12:57:55 -0400 Subject: lpfc: fix high priority issues from fortify source code scan Fixed High priority issues from lpfc given by fortify source code scan. Signed-off-by: James Smart Signed-off-by: Dick Kennedy Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_bsg.c | 8 +++++--- drivers/scsi/lpfc/lpfc_els.c | 5 ++++- drivers/scsi/lpfc/lpfc_init.c | 32 +++++++++++++++++++------------- drivers/scsi/lpfc/lpfc_nportdisc.c | 2 ++ drivers/scsi/lpfc/lpfc_sli.c | 26 +++++++++++++------------- drivers/scsi/lpfc/lpfc_sli4.h | 20 ++++++++++---------- 6 files changed, 53 insertions(+), 40 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index d236448b05b9..a58bffb3f68f 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2827,8 +2827,10 @@ diag_cmd_data_alloc(struct lpfc_hba *phba, size -= cnt; } - mlist->flag = i; - return mlist; + if (mlist) { + mlist->flag = i; + return mlist; + } out: diag_cmd_data_free(phba, mlist); return NULL; @@ -4592,7 +4594,7 @@ sli_cfg_ext_error: * being reset) and com-plete the job, otherwise issue the mailbox command and * let our completion handler finish the command. **/ -static uint32_t +static int lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, struct lpfc_vport *vport) { diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 9d03e7250fb4..c7e508085413 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1084,7 +1084,8 @@ stop_rr_fcf_flogi: * accessing it. */ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); - + if (!prsp) + goto out; sp = prsp->virt + sizeof(uint32_t); /* FLOGI completes successfully */ @@ -7521,6 +7522,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; sp = prsp->virt + sizeof(uint32_t); fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); memcpy(&vport->fabric_portname, &sp->portName, diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 33a24fc0afec..0d9230486d6e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -306,10 +306,10 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) dist = dist_char[prg->dist]; if ((prg->dist == 3) && (prg->num == 0)) - sprintf(phba->OptionROMVersion, "%d.%d%d", + snprintf(phba->OptionROMVersion, 32, "%d.%d%d", prg->ver, prg->rev, prg->lev); else - sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", + snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", prg->ver, prg->rev, prg->lev, dist, prg->num); mempool_free(pmboxq, phba->mbox_mem_pool); @@ -3177,9 +3177,11 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) for (i = 0; i < scsi_xri_cnt; i++) { list_remove_head(&scsi_sgl_list, psb, struct lpfc_scsi_buf, list); - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data, - psb->dma_handle); - kfree(psb); + if (psb) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + } } spin_lock_irq(&phba->scsi_buf_list_get_lock); phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; @@ -7424,7 +7426,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0523 Failed setup of fast-path EQ " - "(%d), rc = 0x%x\n", fcp_eqidx, rc); + "(%d), rc = 0x%x\n", fcp_eqidx, + (uint32_t)rc); goto out_destroy_hba_eq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -7455,7 +7458,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0527 Failed setup of fast-path FCP " - "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); + "CQ (%d), rc = 0x%x\n", fcp_cqidx, + (uint32_t)rc); goto out_destroy_fcp_cq; } @@ -7495,7 +7499,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0535 Failed setup of fast-path FCP " - "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); + "WQ (%d), rc = 0x%x\n", fcp_wqidx, + (uint32_t)rc); goto out_destroy_fcp_wq; } @@ -7528,7 +7533,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0529 Failed setup of slow-path mailbox CQ: " - "rc = 0x%x\n", rc); + "rc = 0x%x\n", (uint32_t)rc); goto out_destroy_fcp_wq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -7548,7 +7553,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0531 Failed setup of slow-path ELS CQ: " - "rc = 0x%x\n", rc); + "rc = 0x%x\n", (uint32_t)rc); goto out_destroy_mbx_cq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -7592,7 +7597,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0537 Failed setup of slow-path ELS WQ: " - "rc = 0x%x\n", rc); + "rc = 0x%x\n", (uint32_t)rc); goto out_destroy_mbx_wq; } @@ -7624,7 +7629,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0541 Failed setup of Receive Queue: " - "rc = 0x%x\n", rc); + "rc = 0x%x\n", (uint32_t)rc); goto out_destroy_fcp_wq; } @@ -8815,7 +8820,8 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { memset(&phba->sli4_hba.handler_name[index], 0, 16); - sprintf((char *)&phba->sli4_hba.handler_name[index], + snprintf((char *)&phba->sli4_hba.handler_name[index], + LPFC_SLI4_HANDLER_NAME_SZ, LPFC_DRIVER_HANDLER_NAME"%d", index); phba->sli4_hba.fcp_eq_hdl[index].idx = index; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index c342f6afd747..5cc1103d811e 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1031,6 +1031,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; lp = (uint32_t *) prsp->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 7185aac24324..2c7057fa2adb 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -8107,7 +8107,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, * * Return: index into SLI4 fast-path FCP queue index. **/ -static inline uint32_t +static inline int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) { struct lpfc_vector_map_info *cpup; @@ -12837,7 +12837,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ -uint32_t +int lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) { struct lpfc_mbx_modify_eq_delay *eq_delay; @@ -12923,7 +12923,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ -uint32_t +int lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) { struct lpfc_mbx_eq_create *eq_create; @@ -13045,7 +13045,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ -uint32_t +int lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_queue *eq, uint32_t type, uint32_t subtype) { @@ -13386,7 +13386,7 @@ out: * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ -uint32_t +int lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, struct lpfc_queue *cq, uint32_t subtype) { @@ -13622,7 +13622,7 @@ lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno) * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ -uint32_t +int lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) { @@ -13887,7 +13887,7 @@ out: * On success this function will return a zero. If the queue destroy mailbox * command fails this function will return -ENXIO. **/ -uint32_t +int lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) { LPFC_MBOXQ_t *mbox; @@ -13943,7 +13943,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) * On success this function will return a zero. If the queue destroy mailbox * command fails this function will return -ENXIO. **/ -uint32_t +int lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) { LPFC_MBOXQ_t *mbox; @@ -13997,7 +13997,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) * On success this function will return a zero. If the queue destroy mailbox * command fails this function will return -ENXIO. **/ -uint32_t +int lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) { LPFC_MBOXQ_t *mbox; @@ -14051,7 +14051,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) * On success this function will return a zero. If the queue destroy mailbox * command fails this function will return -ENXIO. **/ -uint32_t +int lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) { LPFC_MBOXQ_t *mbox; @@ -14104,7 +14104,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) * On success this function will return a zero. If the queue destroy mailbox * command fails this function will return -ENXIO. **/ -uint32_t +int lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, struct lpfc_queue *drq) { @@ -15011,7 +15011,7 @@ uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *phba, uint16_t xri) { - int i; + uint16_t i; for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { if (xri == phba->sli4_hba.xri_ids[i]) @@ -16948,7 +16948,7 @@ lpfc_drain_txq(struct lpfc_hba *phba) char *fail_msg = NULL; struct lpfc_sglq *sglq; union lpfc_wqe wqe; - int txq_cnt = 0; + uint32_t txq_cnt = 0; spin_lock_irqsave(&pring->ring_lock, iflags); list_for_each_entry(piocbq, &pring->txq, list) { diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 7f50aa04d66a..22ceb2b05ba1 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -670,22 +670,22 @@ void lpfc_sli4_hba_reset(struct lpfc_hba *); struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, uint32_t); void lpfc_sli4_queue_free(struct lpfc_queue *); -uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); -uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t); -uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, +int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); +int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t); +int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t, uint32_t); int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); -uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, +int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); -uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, +int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int); -uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); -uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); -uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); -uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *); -uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, +int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); +int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); +int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); +int lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *); +int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *); int lpfc_sli4_queue_setup(struct lpfc_hba *); void lpfc_sli4_queue_unset(struct lpfc_hba *); -- cgit From eb01656688edd686a80c89281043fe6f2b2af6ff Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 3 Sep 2014 12:58:06 -0400 Subject: lpfc: fix low priority issues from fortify source code scan Fixed Low priority issues from lpfc given by fortify source code scan. Signed-off-by: James Smart Signed-off-by: Dick Kennedy Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_attr.c | 2 +- drivers/scsi/lpfc/lpfc_bsg.c | 5 ----- drivers/scsi/lpfc/lpfc_ct.c | 14 ++++++++------ drivers/scsi/lpfc/lpfc_debugfs.c | 4 ++-- drivers/scsi/lpfc/lpfc_els.c | 16 ++++------------ drivers/scsi/lpfc/lpfc_hbadisc.c | 10 +--------- drivers/scsi/lpfc/lpfc_init.c | 2 -- drivers/scsi/lpfc/lpfc_sli.c | 21 +++++++++------------ 8 files changed, 25 insertions(+), 49 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 6eed9e76a166..2f9b96826ac0 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -3385,7 +3385,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1)) return -EINVAL; - strcpy(bucket_data, buf); + strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN); str_ptr = &bucket_data[0]; /* Ignore this token - this is command token */ token = strsep(&str_ptr, "\t "); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index a58bffb3f68f..a7bf359aa0c6 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -656,7 +656,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) struct lpfc_nodelist *ndlp = rdata->pnode; uint32_t elscmd; uint32_t cmdsize; - uint32_t rspsize; struct lpfc_iocbq *cmdiocbq; uint16_t rpi = 0; struct bsg_job_data *dd_data; @@ -687,7 +686,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) elscmd = job->request->rqst_data.r_els.els_code; cmdsize = job->request_payload.payload_len; - rspsize = job->reply_payload.payload_len; if (!lpfc_nlp_get(ndlp)) { rc = -ENODEV; @@ -2251,7 +2249,6 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job) i = 0; while (phba->link_state != LPFC_LINK_DOWN) { if (i++ > timeout) { - rc = -ETIMEDOUT; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3140 Timeout waiting for link to " "diagnostic mode_end, timeout:%d ms\n", @@ -2291,7 +2288,6 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job) LPFC_MBOXQ_t *pmboxq; struct sli4_link_diag *link_diag_test_cmd; uint32_t req_len, alloc_len; - uint32_t timeout; struct lpfc_mbx_run_link_diag_test *run_link_diag_test; union lpfc_sli4_cfg_shdr *shdr; uint32_t shdr_status, shdr_add_status; @@ -2342,7 +2338,6 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job) link_diag_test_cmd = (struct sli4_link_diag *) job->request->rqst_data.h_vendor.vendor_cmd; - timeout = link_diag_test_cmd->timeout * 100; rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index da61d8dc0449..61a32cd23f79 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1439,7 +1439,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) /* #2 HBA attribute entry */ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER); - strcpy(ae->un.Manufacturer, "Emulex Corporation"); + strncpy(ae->un.Manufacturer, "Emulex Corporation", 64); len = strlen(ae->un.Manufacturer); len += (len & 3) ? (4 - (len & 3)) : 4; ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); @@ -1449,7 +1449,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) /* #3 HBA attribute entry */ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER); - strcpy(ae->un.SerialNumber, phba->SerialNumber); + strncpy(ae->un.SerialNumber, phba->SerialNumber, 64); len = strlen(ae->un.SerialNumber); len += (len & 3) ? (4 - (len & 3)) : 4; ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); @@ -1459,7 +1459,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) /* #4 HBA attribute entry */ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); ae->ad.bits.AttrType = be16_to_cpu(MODEL); - strcpy(ae->un.Model, phba->ModelName); + strncpy(ae->un.Model, phba->ModelName, 256); len = strlen(ae->un.Model); len += (len & 3) ? (4 - (len & 3)) : 4; ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); @@ -1469,7 +1469,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) /* #5 HBA attribute entry */ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION); - strcpy(ae->un.ModelDescription, phba->ModelDesc); + strncpy(ae->un.ModelDescription, phba->ModelDesc, 256); len = strlen(ae->un.ModelDescription); len += (len & 3) ? (4 - (len & 3)) : 4; ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); @@ -1500,7 +1500,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) /* #7 HBA attribute entry */ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION); - strcpy(ae->un.DriverVersion, lpfc_release_version); + strncpy(ae->un.DriverVersion, + lpfc_release_version, 256); len = strlen(ae->un.DriverVersion); len += (len & 3) ? (4 - (len & 3)) : 4; ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); @@ -1510,7 +1511,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) /* #8 HBA attribute entry */ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION); - strcpy(ae->un.OptionROMVersion, phba->OptionROMVersion); + strncpy(ae->un.OptionROMVersion, + phba->OptionROMVersion, 256); len = strlen(ae->un.OptionROMVersion); len += (len & 3) ? (4 - (len & 3)) : 4; ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index b0aedce3f54b..786a2aff7b59 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -269,7 +269,7 @@ static int lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) { int len = 0; - int cnt, i, j, found, posted, low; + int i, j, found, posted, low; uint32_t phys, raw_index, getidx; struct lpfc_hbq_init *hip; struct hbq_s *hbqs; @@ -279,7 +279,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) if (phba->sli_rev != 3) return 0; - cnt = LPFC_HBQINFO_SIZE; + spin_lock_irq(&phba->hbalock); /* toggle between multiple hbqs, if any */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c7e508085413..4c25485aa934 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1829,7 +1829,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOCB_t *irsp; struct lpfc_nodelist *ndlp; struct lpfc_dmabuf *prsp; - int disc, rc, did, type; + int disc, rc; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; @@ -1874,10 +1874,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } - /* ndlp could be freed in DSM, save these values now */ - type = ndlp->nlp_type; - did = ndlp->nlp_DID; - if (irsp->ulpStatus) { /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { @@ -2270,8 +2266,6 @@ lpfc_adisc_done(struct lpfc_vport *vport) void lpfc_more_adisc(struct lpfc_vport *vport) { - int sentadisc; - if (vport->num_disc_nodes) vport->num_disc_nodes--; /* Continue discovery with ADISCs to go */ @@ -2284,7 +2278,7 @@ lpfc_more_adisc(struct lpfc_vport *vport) if (vport->fc_flag & FC_NLP_MORE) { lpfc_set_disctmo(vport); /* go thru NPR nodes and issue any remaining ELS ADISCs */ - sentadisc = lpfc_els_disc_adisc(vport); + lpfc_els_disc_adisc(vport); } if (!vport->num_disc_nodes) lpfc_adisc_done(vport); @@ -3028,10 +3022,9 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) { struct lpfc_vport *vport = ndlp->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - uint32_t cmd, did, retry; + uint32_t cmd, retry; spin_lock_irq(shost->host_lock); - did = ndlp->nlp_DID; cmd = ndlp->nlp_last_elscmd; ndlp->nlp_last_elscmd = 0; @@ -5289,10 +5282,9 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, IOCB_t *icmd; RNID *rn; struct ls_rjt stat; - uint32_t cmd, did; + uint32_t cmd; icmd = &cmdiocb->iocb; - did = icmd->un.elsreq64.remoteID; pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 310507dede2a..5452f1f4220e 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -5031,7 +5031,6 @@ lpfc_disc_start(struct lpfc_vport *vport) struct lpfc_hba *phba = vport->phba; uint32_t num_sent; uint32_t clear_la_pending; - int did_changed; if (!lpfc_is_link_up(phba)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, @@ -5050,11 +5049,6 @@ lpfc_disc_start(struct lpfc_vport *vport) lpfc_set_disctmo(vport); - if (vport->fc_prevDID == vport->fc_myDID) - did_changed = 0; - else - did_changed = 1; - vport->fc_prevDID = vport->fc_myDID; vport->num_disc_nodes = 0; @@ -6343,7 +6337,7 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba, uint8_t *buff, uint32_t size) { - uint32_t offset = 0, rec_length; + uint32_t offset = 0; uint8_t *rec_ptr; /* @@ -6370,8 +6364,6 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba, } offset += 4; - rec_length = buff[offset + 1]; - /* Read FCoE param record */ rec_ptr = lpfc_get_rec_conf23(&buff[offset], size - offset, FCOE_PARAM_TYPE); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 0d9230486d6e..0b2c53af85c7 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -6719,7 +6719,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) struct lpfc_mbx_get_func_cfg *get_func_cfg; struct lpfc_rsrc_desc_fcfcoe *desc; char *pdesc_0; - uint32_t desc_count; int length, i, rc = 0, rc2; pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -6850,7 +6849,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) /* search for fc_fcoe resrouce descriptor */ get_func_cfg = &pmb->u.mqe.un.get_func_cfg; - desc_count = get_func_cfg->func_cfg.rsrc_desc_count; pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2c7057fa2adb..cea89c6ce1d6 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -187,7 +187,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) { struct lpfc_mqe *temp_mqe; struct lpfc_register doorbell; - uint32_t host_index; /* sanity check on queue memory */ if (unlikely(!q)) @@ -202,7 +201,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) q->phba->mbox = (MAILBOX_t *)temp_mqe; /* Update the host index before invoking device */ - host_index = q->host_index; q->host_index = ((q->host_index + 1) % q->entry_count); /* Ring Doorbell */ @@ -2435,11 +2433,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, IOCB_t * irsp; WORD5 * w5p; uint32_t Rctl, Type; - uint32_t match; struct lpfc_iocbq *iocbq; struct lpfc_dmabuf *dmzbuf; - match = 0; irsp = &(saveq->iocb); if (irsp->ulpCommand == CMD_ASYNC_STATUS) { @@ -2865,7 +2861,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) void lpfc_poll_eratt(unsigned long ptr) { struct lpfc_hba *phba; - uint32_t eratt = 0, rem; + uint32_t eratt = 0; uint64_t sli_intr, cnt; phba = (struct lpfc_hba *)ptr; @@ -2880,7 +2876,7 @@ void lpfc_poll_eratt(unsigned long ptr) cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); /* 64-bit integer division not supporte on 32-bit x86 - use do_div */ - rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL); + do_div(cnt, LPFC_ERATT_POLL_INTERVAL); phba->sli.slistat.sli_ips = cnt; phba->sli.slistat.sli_prev_intr = sli_intr; @@ -5953,9 +5949,6 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, curr_blks++; } - /* Calculate the total requested length of the dma memory. */ - req_len = curr_blks * sizeof(uint16_t); - /* * Calculate the size of an embedded mailbox. The uint32_t * accounts for extents-specific word. @@ -6766,13 +6759,16 @@ void lpfc_mbox_timeout_handler(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; - MAILBOX_t *mb = &pmbox->u.mb; + MAILBOX_t *mb = NULL; + struct lpfc_sli *psli = &phba->sli; /* If the mailbox completed, process the completion and return */ if (lpfc_sli4_process_missed_mbox_completions(phba)) return; + if (pmbox != NULL) + mb = &pmbox->u.mb; /* Check the pmbox pointer first. There is a race condition * between the mbox timeout handler getting executed in the * worklist and the mailbox actually completing. When this @@ -8121,7 +8117,6 @@ lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) cpup += cpu; return cpup->channel_id; } - chann = cpu; } chann = atomic_add_return(1, &phba->fcp_qidx); chann = (chann % phba->cfg_fcp_io_channel); @@ -12604,6 +12599,9 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) * Process all the event on FCP fast-path EQ */ while ((eqe = lpfc_sli4_eq_get(fpeq))) { + if (eqe == NULL) + break; + lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx); if (!(++ecount % fpeq->entry_repost)) lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); @@ -14244,7 +14242,6 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba, "2511 POST_SGL mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); - rc = -ENXIO; } return 0; } -- cgit From 8c50d25c0edd8ec05a7e069c23f6ac2e50c898b1 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 3 Sep 2014 12:58:16 -0400 Subject: lpfc: fix for handling unmapped ndlp in target reset handler Fix for handling unmapped ndlp in target reset handler Signed-off-by: James Smart Signed-off-by: Dick Kennedy Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_scsi.c | 8 +++++++- drivers/scsi/lpfc/lpfc_sli.c | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 0f6be8560508..b99399fe2548 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -5339,7 +5339,13 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) if (status == FAILED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0722 Target Reset rport failure: rdata x%p\n", rdata); - return FAILED; + spin_lock_irq(shost->host_lock); + pnode->nlp_flag &= ~NLP_NPR_ADISC; + pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + spin_unlock_irq(shost->host_lock); + lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + LPFC_CTX_TGT); + return FAST_IO_FAIL; } scsi_event.event_type = FC_REG_SCSI_EVENT; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index cea89c6ce1d6..207a43d952fa 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -10132,7 +10132,9 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) { struct lpfc_hba *phba = vport->phba; + struct lpfc_scsi_buf *lpfc_cmd; struct lpfc_iocbq *abtsiocbq; + struct lpfc_nodelist *ndlp; struct lpfc_iocbq *iocbq; IOCB_t *icmd; int sum, i, ret_val; @@ -10187,7 +10189,11 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, if (iocbq->iocb_flag & LPFC_IO_FOF) abtsiocbq->iocb_flag |= LPFC_IO_FOF; - if (lpfc_is_link_up(phba)) + lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); + ndlp = lpfc_cmd->rdata->pnode; + + if (lpfc_is_link_up(phba) && + (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; else abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; -- cgit From fd10ccfa8dfe740f61471b212c200fbdb5de82f5 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 3 Sep 2014 12:58:23 -0400 Subject: lpfc: update lpfc version to driver version 10.4.8000.0 Update lpfc version to driver version 10.4.8000.0 Signed-off-by: James Smart Signed-off-by: Dick Kennedy Signed-off-by: Christoph Hellwig --- drivers/scsi/lpfc/lpfc_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 41675c1193e7..89413add2252 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "10.2.8001.0." +#define LPFC_DRIVER_VERSION "10.4.8000.0." #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ -- cgit From 5565461e30c15525c431814dd612118a78d05992 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Tue, 2 Sep 2014 14:34:58 -0400 Subject: bnx2fc: fix incorrect DMA memory mapping in bnx2fc_unmap_sg_list() This patch is based on a problem and solution from Maurizio Lombardi where bnx2fc isn't consistent in which device struct we using for DMA map and unmap operations. Make them consistent by using dma_sg_unmap in bnx2fc_unmap_sg_list like bnx2fc_map_sg. Reviewed-by: Eddie Wai Signed-off-by: Chad Dupuis Signed-off-by: Christoph Hellwig --- drivers/scsi/bnx2fc/bnx2fc_io.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 4c5891e66038..0679782d9d15 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1654,6 +1654,10 @@ static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) u64 addr; int i; + /* + * Use dma_map_sg directly to ensure we're using the correct + * dev struct off of pcidev. + */ sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc), scsi_sg_count(sc), sc->sc_data_direction); scsi_for_each_sg(sc, sg, sg_count, i) { @@ -1703,9 +1707,16 @@ static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) { struct scsi_cmnd *sc = io_req->sc_cmd; + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; - if (io_req->bd_tbl->bd_valid && sc) { - scsi_dma_unmap(sc); + /* + * Use dma_unmap_sg directly to ensure we're using the correct + * dev struct off of pcidev. + */ + if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { + dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); io_req->bd_tbl->bd_valid = 0; } } -- cgit From 7c160fac54d81999e1a6df288d77b156705b36f9 Mon Sep 17 00:00:00 2001 From: Tej Parkash Date: Mon, 19 May 2014 07:32:13 -0400 Subject: bnx2i: Make boot_nic entry visible in the sysfs session objects Signed-off-by: Tej Parkash Signed-off-by: Vikas Chaudhary Reviewed-by: Mike Christie Acked-by: Eddie Wai Signed-off-by: Christoph Hellwig --- drivers/scsi/bnx2i/bnx2i_iscsi.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 40e22497d249..7a36388822aa 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -2235,6 +2235,9 @@ static umode_t bnx2i_attr_is_visible(int param_type, int param) case ISCSI_PARAM_TGT_RESET_TMO: case ISCSI_PARAM_IFACE_NAME: case ISCSI_PARAM_INITIATOR_NAME: + case ISCSI_PARAM_BOOT_ROOT: + case ISCSI_PARAM_BOOT_NIC: + case ISCSI_PARAM_BOOT_TARGET: return S_IRUGO; default: return 0; -- cgit From 4e1f20ae4f92706518d6c18b13da8c2bc28339c7 Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:16 +0530 Subject: mpt2sas: Added driver module parameter max_msix_vectors Added driver module parameter max_msix_vectors. Using this module parameter the maximum number of MSI-X vectors could be set. The number of MSI-X vectors used would be the minimum of MSI-X vectors supported by the HBA, the number of CPU cores and the value set to max_msix_vectors module parameters. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/mpt2sas_base.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index cf51b48dd7d9..b1726280595a 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -80,6 +80,10 @@ static int msix_disable = -1; module_param(msix_disable, int, 0); MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); +static int max_msix_vectors = -1; +module_param(max_msix_vectors, int, 0); +MODULE_PARM_DESC(max_msix_vectors, " max msix vectors "); + static int mpt2sas_fwfault_debug; MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " "and halt firmware - (default=0)"); @@ -1402,6 +1406,16 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc) ioc->reply_queue_count = min_t(int, ioc->cpu_count, ioc->msix_vector_count); + if (max_msix_vectors > 0) { + ioc->reply_queue_count = min_t(int, max_msix_vectors, + ioc->reply_queue_count); + ioc->msix_vector_count = ioc->reply_queue_count; + } + + printk(MPT2SAS_INFO_FMT + "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n", + ioc->name, ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors); + entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), GFP_KERNEL); if (!entries) { -- cgit From ff4637d6d1f2f4fdf014404b5b4b1d1e4adc6283 Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:17 +0530 Subject: mpt2sas: MPI2 Rev Y (2.00.17) and Rev Z (2.00.18) specifications Below is the change set in MPI2 Rev Y specification and in 2.00.17 header files 1) Added SCSIStatusQualifier to SCSI IO Error Reply message. 2) Added ATA Security Freeze Lock to IO Unit Page 1 Flags field. Below is the change set in MPI2 Rev Z specification and in 2.00.19 header files 1) Added reserved fields to IO Unit Page 7 for future use. 2) Added optional functionality to IOCInit Request so that the host may specify a separate base address for each Reply Descriptor Post Queue. IOC support for this is indicated using a new IOCCapabilities bit in the IOCFacts Reply. 3) Added Toolbox Console Text Display Tool The host uses the Console Text Display Tool to send a string to IOC's Console using different console types (eg: UART serial terminal or Ethernet terminal). The copyright in the mpi files is updated for year 2014 Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/mpi/mpi2.h | 12 ++++-- drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | 29 ++++++++++---- drivers/scsi/mpt2sas/mpi/mpi2_init.h | 8 ++-- drivers/scsi/mpt2sas/mpi/mpi2_ioc.h | 74 ++++++++++++++++++++++++++++++------ drivers/scsi/mpt2sas/mpi/mpi2_raid.h | 8 +++- drivers/scsi/mpt2sas/mpi/mpi2_sas.h | 2 +- drivers/scsi/mpt2sas/mpi/mpi2_tool.h | 44 ++++++++++++++++++++- drivers/scsi/mpt2sas/mpi/mpi2_type.h | 2 +- 8 files changed, 148 insertions(+), 31 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index 7b14a015c903..088eefa67da8 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2.h @@ -8,7 +8,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.28 + * mpi2.h Version: 02.00.32 * * Version History * --------------- @@ -78,6 +78,11 @@ * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. + * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. + * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. * -------------------------------------------------------------------------- */ @@ -103,7 +108,7 @@ #define MPI2_VERSION_02_00 (0x0200) /* versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x1C) +#define MPI2_HEADER_VERSION_UNIT (0x20) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) @@ -263,6 +268,7 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS #define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF) #define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000) #define MPI2_RPHI_MSIX_INDEX_SHIFT (24) +#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /* MPI v2.5 only */ /* * Defines for the HCBSize and address diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index 88cb7f828bbd..510ef0dc8d7b 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_cnfg.h * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.23 + * mpi2_cnfg.h Version: 02.00.26 * * Version History * --------------- @@ -150,7 +150,13 @@ * Added UEFIVersion field to BIOS Page 1 and defined new * BiosOptions bits. * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. - * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. + * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. + * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as + * obsolete for MPI v2.5 and later. + * Added some defines for 12G SAS speeds. + * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. + * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to + * match the specification. * -------------------------------------------------------------------------- */ @@ -773,6 +779,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 #define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) /* IO Unit Page 1 Flags defines */ +#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000) #define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) #define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) #define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9) @@ -844,7 +851,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 { #define MPI2_IOUNITPAGE5_PAGEVERSION (0x00) /* defines for IO Unit Page 5 DmaEngineCapabilities field */ -#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFF00) +#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000) #define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16) #define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008) @@ -885,13 +892,17 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { U16 IOCTemperature; /* 0x10 */ U8 IOCTemperatureUnits; /* 0x12 */ U8 IOCSpeed; /* 0x13 */ - U16 BoardTemperature; /* 0x14 */ - U8 BoardTemperatureUnits; /* 0x16 */ - U8 Reserved3; /* 0x17 */ + U16 BoardTemperature; /* 0x14 */ + U8 BoardTemperatureUnits; /* 0x16 */ + U8 Reserved3; /* 0x17 */ + U32 Reserved4; /* 0x18 */ + U32 Reserved5; /* 0x1C */ + U32 Reserved6; /* 0x20 */ + U32 Reserved7; /* 0x24 */ } MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t; -#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02) +#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04) /* defines for IO Unit Page 7 PCIeWidth field */ #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) @@ -1801,6 +1812,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 #define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80) #define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90) #define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0) #define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F) #define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00) #define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08) @@ -1813,6 +1825,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 #define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80) #define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90) #define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0) #define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F) #define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08) #define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h index 9d284dae6553..eea1a16b13ec 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_init.h * Title: MPI SCSI initiator mode messages and structures * Creation Date: June 23, 2006 * - * mpi2_init.h Version: 02.00.14 + * mpi2_init.h Version: 02.00.15 * * Version History * --------------- @@ -37,6 +37,8 @@ * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command * Priority to match SAM-4. * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. + * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, + * replacing the Reserved4 field. * -------------------------------------------------------------------------- */ @@ -234,7 +236,7 @@ typedef struct _MPI2_SCSI_IO_REPLY U32 SenseCount; /* 0x18 */ U32 ResponseInfo; /* 0x1C */ U16 TaskTag; /* 0x20 */ - U16 Reserved4; /* 0x22 */ + U16 SCSIStatusQualifier; /* 0x22 */ U32 BidirectionalTransferCount; /* 0x24 */ U32 Reserved5; /* 0x28 */ U32 Reserved6; /* 0x2C */ diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h index d159c5f24aab..2c3b0f28576b 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_ioc.h * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Creation Date: October 11, 2006 * - * mpi2_ioc.h Version: 02.00.22 + * mpi2_ioc.h Version: 02.00.23 * * Version History * --------------- @@ -121,6 +121,11 @@ * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. * Added ElapsedSeconds field to * MPI2_EVENT_DATA_IR_OPERATION_STATUS. + * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE + * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. + * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. + * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. + * Added Encrypted Hash Extended Image. * -------------------------------------------------------------------------- */ @@ -177,6 +182,9 @@ typedef struct _MPI2_IOC_INIT_REQUEST #define MPI2_WHOINIT_HOST_DRIVER (0x04) #define MPI2_WHOINIT_MANUFACTURER (0x05) +/* MsgFlags */ +#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) + /* MsgVersion */ #define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00) #define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8) @@ -189,9 +197,17 @@ typedef struct _MPI2_IOC_INIT_REQUEST #define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) #define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) -/* minimum depth for the Reply Descriptor Post Queue */ +/* minimum depth for a Reply Descriptor Post Queue */ #define MPI2_RDPQ_DEPTH_MIN (16) +/* Reply Descriptor Post Queue Array Entry */ +typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY { + U64 RDPQBaseAddress; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, +MPI2_POINTER PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, +Mpi2IOCInitRDPQArrayEntry, MPI2_POINTER pMpi2IOCInitRDPQArrayEntry; /* IOCInit Reply message */ typedef struct _MPI2_IOC_INIT_REPLY @@ -307,6 +323,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY /* ProductID field uses MPI2_FW_HEADER_PID_ */ /* IOCCapabilities */ +#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) #define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) #define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) #define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) @@ -1153,6 +1170,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST #define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) #define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) #define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C) #define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) /* FWDownload TransactionContext Element */ @@ -1379,14 +1397,15 @@ typedef struct _MPI2_EXT_IMAGE_HEADER #define MPI2_EXT_IMAGE_HEADER_SIZE (0x40) /* defines for the ImageType field */ -#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) -#define MPI2_EXT_IMAGE_TYPE_FW (0x01) -#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03) -#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04) -#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05) -#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) -#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) -#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) +#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) +#define MPI2_EXT_IMAGE_TYPE_FW (0x01) +#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03) +#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04) +#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05) +#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) +#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) +#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) +#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) #define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) #define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) #define MPI2_EXT_IMAGE_TYPE_MAX \ @@ -1555,6 +1574,39 @@ typedef struct _MPI2_INIT_IMAGE_FOOTER #define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) +/* Encrypted Hash Extended Image Data */ + +typedef struct _MPI25_ENCRYPTED_HASH_ENTRY { + U8 HashImageType; /* 0x00 */ + U8 HashAlgorithm; /* 0x01 */ + U8 EncryptionAlgorithm; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 Reserved2; /* 0x04 */ + U32 EncryptedHash[1]; /* 0x08 */ +} MPI25_ENCRYPTED_HASH_ENTRY, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_ENTRY, +Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t; + +/* values for HashImageType */ +#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) +#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) + +/* values for HashAlgorithm */ +#define MPI25_HASH_ALGORITHM_UNUSED (0x00) +#define MPI25_HASH_ALGORITHM_SHA256 (0x01) + +/* values for EncryptionAlgorithm */ +#define MPI25_ENCRYPTION_ALG_UNUSED (0x00) +#define MPI25_ENCRYPTION_ALG_RSA256 (0x01) + +typedef struct _MPI25_ENCRYPTED_HASH_DATA { + U8 ImageVersion; /* 0x00 */ + U8 NumHash; /* 0x01 */ + U16 Reserved1; /* 0x02 */ + U32 Reserved2; /* 0x04 */ + MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */ +} MPI25_ENCRYPTED_HASH_DATA, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_DATA, +Mpi25EncryptedHashData_t, MPI2_POINTER pMpi25EncryptedHashData_t; + /**************************************************************************** * PowerManagementControl message ****************************************************************************/ diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h index 0d202a2c6db7..7efa58ff0d34 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_raid.h * Title: MPI Integrated RAID messages and structures * Creation Date: April 26, 2007 * - * mpi2_raid.h Version: 02.00.09 + * mpi2_raid.h Version: 02.00.10 * * Version History * --------------- @@ -29,6 +29,7 @@ * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. + * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. * -------------------------------------------------------------------------- */ @@ -45,6 +46,9 @@ * RAID Action messages ****************************************************************************/ +/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */ +#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000) + /* ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ #define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) #define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h index 50b39ccd526a..45b6fa10b803 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_sas.h diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h index 11b2ac4e7c6e..9be03ed46180 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_tool.h * Title: MPI diagnostic tool structures and definitions * Creation Date: March 26, 2007 * - * mpi2_tool.h Version: 02.00.10 + * mpi2_tool.h Version: 02.00.11 * * Version History * --------------- @@ -29,6 +29,7 @@ * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that * it uses MPI Chain SGE as well as MPI Simple SGE. + * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. * -------------------------------------------------------------------------- */ @@ -48,6 +49,7 @@ #define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) #define MPI2_TOOLBOX_BEACON_TOOL (0x05) #define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) +#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07) /**************************************************************************** @@ -321,6 +323,44 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY { MPI2_POINTER pMpi2ToolboxDiagnosticCliReply_t; +/**************************************************************************** +* Toolbox Console Text Display Tool +****************************************************************************/ + +/* Toolbox Console Text Display Tool request message */ +typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST { + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Console; /* 0x0C */ + U8 Flags; /* 0x0D */ + U16 Reserved6; /* 0x0E */ + U8 TextToDisplay[4]; /* 0x10 */ +} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, +MPI2_POINTER PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, +Mpi2ToolboxTextDisplayRequest_t, +MPI2_POINTER pMpi2ToolboxTextDisplayRequest_t; + +/* defines for the Console field */ +#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0) +#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00) +#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10) +#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20) + +#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F) + +/* defines for the Flags field */ +#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01) + + + /***************************************************************************** * * Diagnostic Buffer Messages diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_type.h b/drivers/scsi/mpt2sas/mpi/mpi2_type.h index 0b128b68a5ea..6b0dcdd02f68 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_type.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_type.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_type.h -- cgit From e9edbe310dbb745067f7c5761ba91005fb9d60af Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:18 +0530 Subject: mpt2sas: Copyright in driver sources is updated for year the 2014 Copyright in driver sources is updated for year the 2014. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/Kconfig | 2 +- drivers/scsi/mpt2sas/mpt2sas_base.c | 2 +- drivers/scsi/mpt2sas/mpt2sas_base.h | 2 +- drivers/scsi/mpt2sas/mpt2sas_config.c | 2 +- drivers/scsi/mpt2sas/mpt2sas_ctl.c | 2 +- drivers/scsi/mpt2sas/mpt2sas_ctl.h | 2 +- drivers/scsi/mpt2sas/mpt2sas_debug.h | 2 +- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 2 +- drivers/scsi/mpt2sas/mpt2sas_transport.c | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig index 39f08dd20556..657b45ca04c5 100644 --- a/drivers/scsi/mpt2sas/Kconfig +++ b/drivers/scsi/mpt2sas/Kconfig @@ -2,7 +2,7 @@ # Kernel configuration file for the MPT2SAS # # This code is based on drivers/scsi/mpt2sas/Kconfig -# Copyright (C) 2007-2012 LSI Corporation +# Copyright (C) 2007-2014 LSI Corporation # (mailto:DL-MPTFusionLinux@lsi.com) # This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index b1726280595a..8a78ab767e86 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -3,7 +3,7 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt2sas/mpt2_base.c - * Copyright (C) 2007-2013 LSI Corporation + * Copyright (C) 2007-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 0ac5815a7f91..89f944ac0028 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -3,7 +3,7 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt2sas/mpt2_base.h - * Copyright (C) 2007-2013 LSI Corporation + * Copyright (C) 2007-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index 0c47425c73f2..c72a2fff5dbb 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c @@ -2,7 +2,7 @@ * This module provides common API for accessing firmware configuration pages * * This code is based on drivers/scsi/mpt2sas/mpt2_base.c - * Copyright (C) 2007-2013 LSI Corporation + * Copyright (C) 2007-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 62df8f9d4271..ca4e563c01dd 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -3,7 +3,7 @@ * controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c - * Copyright (C) 2007-2013 LSI Corporation + * Copyright (C) 2007-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h index 8b2ac1869dcc..fa0567c96050 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h @@ -3,7 +3,7 @@ * controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h - * Copyright (C) 2007-2013 LSI Corporation + * Copyright (C) 2007-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h index a9021cbd6628..cc57ef31d0fe 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_debug.h +++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h @@ -2,7 +2,7 @@ * Logging Support for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c - * Copyright (C) 2007-2013 LSI Corporation + * Copyright (C) 2007-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index dd461015813f..db71b60e8a87 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2,7 +2,7 @@ * Scsi Host Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c - * Copyright (C) 2007-2013 LSI Corporation + * Copyright (C) 2007-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 410f4a3e8888..0d1d06488a28 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -2,7 +2,7 @@ * SAS Transport Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c - * Copyright (C) 2007-2013 LSI Corporation + * Copyright (C) 2007-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or -- cgit From bd58ea3c25dbee16857733f3176dba23c34b64e5 Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:19 +0530 Subject: mpt2sas: Clear PFA Status on SGPIO when PFA Drive is Removed or Replaced Added code to send a SEP message that turns off the Predictive Failure LED when a drive is removed (if Predictive Failure LED was turned on). Added a new flag 'pfa_led_on' per device that tracks the status of Predictive Failure LED. When the drive is removed, this flag is checked and sends the SEP message to turn off the respective Predictive Failure LED. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/mpt2sas_base.h | 2 ++ drivers/scsi/mpt2sas/mpt2sas_scsih.c | 70 +++++++++++++++++++++++++++++++----- 2 files changed, 63 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 89f944ac0028..6ce7020afa52 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -355,6 +355,7 @@ struct _internal_cmd { * @slot: number number * @phy: phy identifier provided in sas device page 0 * @responding: used in _scsih_sas_device_mark_responding + * @pfa_led_on: flag for PFA LED status */ struct _sas_device { struct list_head list; @@ -373,6 +374,7 @@ struct _sas_device { u16 slot; u8 phy; u8 responding; + u8 pfa_led_on; }; /** diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index db71b60e8a87..e7303edc3d8e 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -145,7 +145,7 @@ struct sense_info { }; -#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC) +#define MPT2SAS_TURN_ON_PFA_LED (0xFFFC) #define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD) #define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) /** @@ -4308,7 +4308,7 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, #endif /** - * _scsih_turn_on_fault_led - illuminate Fault LED + * _scsih_turn_on_pfa_led - illuminate PFA LED * @ioc: per adapter object * @handle: device handle * Context: process @@ -4316,10 +4316,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, * Return nothing. */ static void -_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) +_scsih_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) { Mpi2SepReply_t mpi_reply; Mpi2SepRequest_t mpi_request; + struct _sas_device *sas_device; + + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); + if (!sas_device) + return; memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; @@ -4334,6 +4339,47 @@ _scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) __FILE__, __LINE__, __func__); return; } + sas_device->pfa_led_on = 1; + + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT + "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + return; + } +} + +/** + * _scsih_turn_off_pfa_led - turn off PFA LED + * @ioc: per adapter object + * @sas_device: sas device whose PFA LED has to turned off + * Context: process + * + * Return nothing. + */ +static void +_scsih_turn_off_pfa_led(struct MPT2SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + Mpi2SepReply_t mpi_reply; + Mpi2SepRequest_t mpi_request; + + memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = 0; + mpi_request.Slot = cpu_to_le16(sas_device->slot); + mpi_request.DevHandle = 0; + mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); + mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; + if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: " @@ -4345,7 +4391,7 @@ _scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) } /** - * _scsih_send_event_to_turn_on_fault_led - fire delayed event + * _scsih_send_event_to_turn_on_pfa_led - fire delayed event * @ioc: per adapter object * @handle: device handle * Context: interrupt. @@ -4353,14 +4399,14 @@ _scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) * Return nothing. */ static void -_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) +_scsih_send_event_to_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct fw_event_work *fw_event; fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) return; - fw_event->event = MPT2SAS_TURN_ON_FAULT_LED; + fw_event->event = MPT2SAS_TURN_ON_PFA_LED; fw_event->device_handle = handle; fw_event->ioc = ioc; _scsih_fw_event_add(ioc, fw_event); @@ -4404,7 +4450,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) - _scsih_send_event_to_turn_on_fault_led(ioc, handle); + _scsih_send_event_to_turn_on_pfa_led(ioc, handle); /* insert into event log */ sz = offsetof(Mpi2EventNotificationReply_t, EventData) + @@ -5325,6 +5371,12 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, { struct MPT2SAS_TARGET *sas_target_priv_data; + if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && + (sas_device->pfa_led_on)) { + _scsih_turn_off_pfa_led(ioc, sas_device); + sas_device->pfa_led_on = 0; + } + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: " "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, sas_device->handle, (unsigned long long) @@ -7441,8 +7493,8 @@ _firmware_event_work(struct work_struct *work) dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete " "from worker thread\n", ioc->name)); break; - case MPT2SAS_TURN_ON_FAULT_LED: - _scsih_turn_on_fault_led(ioc, fw_event->device_handle); + case MPT2SAS_TURN_ON_PFA_LED: + _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); break; case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: _scsih_sas_topology_change_event(ioc, fw_event); -- cgit From 49563e1e4b202436a36409f365b0a96796db5aad Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:20 +0530 Subject: mpt2sas: Bump mpt2sas driver version to 17.100.00.00 Bump mpt2sas driver version to 17.100.00.00 Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/mpt2sas_base.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 6ce7020afa52..e0e4dd48e9dc 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,8 +69,8 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation " #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "16.100.00.00" -#define MPT2SAS_MAJOR_VERSION 16 +#define MPT2SAS_DRIVER_VERSION "17.100.00.00" +#define MPT2SAS_MAJOR_VERSION 17 #define MPT2SAS_MINOR_VERSION 100 #define MPT2SAS_BUILD_VERSION 00 #define MPT2SAS_RELEASE_VERSION 00 -- cgit From daeaa9df92bd742f4e6d4d6039d689277a8e31bd Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:21 +0530 Subject: mpt2sas: Avoid type casting for direct I/O commands A type casting error caused the max volume LBA to be truncated from 64 to 32 bits. The virtual LBA would also get truncated to 32 bits in the case of a 16-byte READ/WRITE command. Rewrite entire function to get rid of code duplication and type casts. Use get/put_unaligned wrappers to extract and replace the LBA field in the MPI request CDB. Signed-off-by: Martin K. Petersen Tested-by: Sreekanth Reddy Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 117 ++++++++++++----------------------- 1 file changed, 40 insertions(+), 77 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index e7303edc3d8e..231ec34d8ea9 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -55,6 +55,8 @@ #include #include +#include + #include "mpt2sas_base.h" MODULE_AUTHOR(MPT2SAS_AUTHOR); @@ -3858,85 +3860,46 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request, u16 smid) { - u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size; + sector_t v_lba, p_lba, stripe_off, stripe_unit, column, io_size; u32 stripe_sz, stripe_exp; - u8 num_pds, *cdb_ptr, i; - u8 cdb0 = scmd->cmnd[0]; - u64 v_llba; + u8 num_pds, cmd = scmd->cmnd[0]; - /* - * Try Direct I/O to RAID memeber disks - */ - if (cdb0 == READ_16 || cdb0 == READ_10 || - cdb0 == WRITE_16 || cdb0 == WRITE_10) { - cdb_ptr = mpi_request->CDB.CDB32; - - if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4] - | cdb_ptr[5])) { - io_size = scsi_bufflen(scmd) >> - raid_device->block_exponent; - i = (cdb0 < READ_16) ? 2 : 6; - /* get virtual lba */ - v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[i])); - - if (((u64)v_lba + (u64)io_size - 1) <= - (u32)raid_device->max_lba) { - stripe_sz = raid_device->stripe_sz; - stripe_exp = raid_device->stripe_exponent; - stripe_off = v_lba & (stripe_sz - 1); - - /* Check whether IO falls within a stripe */ - if ((stripe_off + io_size) <= stripe_sz) { - num_pds = raid_device->num_pds; - p_lba = v_lba >> stripe_exp; - stripe_unit = p_lba / num_pds; - column = p_lba % num_pds; - p_lba = (stripe_unit << stripe_exp) + - stripe_off; - mpi_request->DevHandle = - cpu_to_le16(raid_device-> - pd_handle[column]); - (*(__be32 *)(&cdb_ptr[i])) = - cpu_to_be32(p_lba); - /* - * WD: To indicate this I/O is directI/O - */ - _scsih_scsi_direct_io_set(ioc, smid, 1); - } - } - } else { - io_size = scsi_bufflen(scmd) >> - raid_device->block_exponent; - /* get virtual lba */ - v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2])); - - if ((v_llba + (u64)io_size - 1) <= - raid_device->max_lba) { - stripe_sz = raid_device->stripe_sz; - stripe_exp = raid_device->stripe_exponent; - stripe_off = (u32) (v_llba & (stripe_sz - 1)); - - /* Check whether IO falls within a stripe */ - if ((stripe_off + io_size) <= stripe_sz) { - num_pds = raid_device->num_pds; - p_lba = (u32)(v_llba >> stripe_exp); - stripe_unit = p_lba / num_pds; - column = p_lba % num_pds; - p_lba = (stripe_unit << stripe_exp) + - stripe_off; - mpi_request->DevHandle = - cpu_to_le16(raid_device-> - pd_handle[column]); - (*(__be64 *)(&cdb_ptr[2])) = - cpu_to_be64((u64)p_lba); - /* - * WD: To indicate this I/O is directI/O - */ - _scsih_scsi_direct_io_set(ioc, smid, 1); - } - } - } - } + if (cmd != READ_10 && cmd != WRITE_10 && + cmd != READ_16 && cmd != WRITE_16) + return; + + if (cmd == READ_10 || cmd == WRITE_10) + v_lba = get_unaligned_be32(&mpi_request->CDB.CDB32[2]); + else + v_lba = get_unaligned_be64(&mpi_request->CDB.CDB32[2]); + + io_size = scsi_bufflen(scmd) >> raid_device->block_exponent; + + if (v_lba + io_size - 1 > raid_device->max_lba) + return; + + stripe_sz = raid_device->stripe_sz; + stripe_exp = raid_device->stripe_exponent; + stripe_off = v_lba & (stripe_sz - 1); + + /* Return unless IO falls within a stripe */ + if (stripe_off + io_size > stripe_sz) + return; + + num_pds = raid_device->num_pds; + p_lba = v_lba >> stripe_exp; + stripe_unit = p_lba / num_pds; + column = p_lba % num_pds; + p_lba = (stripe_unit << stripe_exp) + stripe_off; + mpi_request->DevHandle = cpu_to_le16(raid_device->pd_handle[column]); + + if (cmd == READ_10 || cmd == WRITE_10) + put_unaligned_be32(lower_32_bits(p_lba), + &mpi_request->CDB.CDB32[2]); + else + put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]); + + _scsih_scsi_direct_io_set(ioc, smid, 1); } /** -- cgit From 5fb1bf8aaa832e1e9ca3198de7bbecb8eff7db9c Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:22 +0530 Subject: mpt2sas: Added Reply Descriptor Post Queue (RDPQ) Array support Up to now, Driver allocates a single contiguous block of memory pool for all reply queues and passes down a single address in the ReplyDescriptorPostQueueAddress field of the IOC Init Request Message to the firmware. When firmware receives this address, it will program each of the Reply Descriptor Post Queue registers, as each reply queue has its own register. Thus the firmware, starting from a base address it determines the starting address of the subsequent reply queues through some simple arithmetic calculations. The size of this contiguous block of memory pool is directly proportional to number of MSI-X vectors and the HBA queue depth. For example higher MSIX vectors requires larger contiguous block of memory pool. But some of the OS kernels are unable to allocate this larger contiguous block of memory pool. So, the proposal is to allocate memory independently for each Reply Queue and pass down all of the addresses to the firmware. Then the firmware will just take each address and program the value into the correct register. When HBAs with older firmware(i.e. without RDPQ capability) is used with this new driver then the max_msix_vectors value would be set to 8 by default. Change_set in v1: 1. Declared _base_get_ioc_facts() function at the beginning of the mpt2sas_base.c file instead of moving all these functions before mpt2sas_base_map_resources() function a. _base_wait_for_doorbell_int() b. _base_wait_for_doorbell_ack() c. _base_wait_for_doorbell_not_used() d. _base_handshake_req_reply_wait() e. _base_get_ioc_facts() 2. Initially set the consistent DMA mask to 32 bit and then change it to 64 bit mask after allocating RDPQ pools by calling the function _base_change_consistent_dma_mask. This is to ensure that all the upper 32 bits of RDPQ entries's base address to be same. 3. Reduced the redundancy between the RDPQ and non-RDPQ support in these following functions a. _base_release_memory_pools() b. _base_allocate_memory_pools() c. _base_send_ioc_init() d. _base_make_ioc_operational() Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/mpt2sas_base.c | 240 +++++++++++++++++++++++++++--------- drivers/scsi/mpt2sas/mpt2sas_base.h | 20 ++- 2 files changed, 199 insertions(+), 61 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 8a78ab767e86..fc9dfda72699 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -92,6 +92,9 @@ static int disable_discovery = -1; module_param(disable_discovery, int, 0); MODULE_PARM_DESC(disable_discovery, " disable discovery "); +static int +_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag); + /** * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. * @@ -1179,17 +1182,22 @@ static int _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev) { struct sysinfo s; - char *desc = NULL; + u64 consistent_dma_mask; + + if (ioc->dma_mask) + consistent_dma_mask = DMA_BIT_MASK(64); + else + consistent_dma_mask = DMA_BIT_MASK(32); if (sizeof(dma_addr_t) > 4) { const uint64_t required_mask = dma_get_required_mask(&pdev->dev); - if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev, - DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(64))) { + if ((required_mask > DMA_BIT_MASK(32)) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) { ioc->base_add_sg_single = &_base_add_sg_single_64; ioc->sge_size = sizeof(Mpi2SGESimple64_t); - desc = "64"; + ioc->dma_mask = 64; goto out; } } @@ -1198,18 +1206,29 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev) && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { ioc->base_add_sg_single = &_base_add_sg_single_32; ioc->sge_size = sizeof(Mpi2SGESimple32_t); - desc = "32"; + ioc->dma_mask = 32; } else return -ENODEV; out: si_meminfo(&s); - printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, " - "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram)); + printk(MPT2SAS_INFO_FMT + "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", + ioc->name, ioc->dma_mask, convert_to_kb(s.totalram)); return 0; } +static int +_base_change_consistent_dma_mask(struct MPT2SAS_ADAPTER *ioc, + struct pci_dev *pdev) +{ + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + return -ENODEV; + } + return 0; +} /** * _base_check_enable_msix - checks MSIX capabable. * @ioc: per adapter object @@ -1406,11 +1425,15 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc) ioc->reply_queue_count = min_t(int, ioc->cpu_count, ioc->msix_vector_count); + if (!ioc->rdpq_array_enable && max_msix_vectors == -1) + max_msix_vectors = 8; + if (max_msix_vectors > 0) { ioc->reply_queue_count = min_t(int, max_msix_vectors, ioc->reply_queue_count); ioc->msix_vector_count = ioc->reply_queue_count; - } + } else if (max_msix_vectors == 0) + goto try_ioapic; printk(MPT2SAS_INFO_FMT "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n", @@ -1453,6 +1476,7 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc) /* failback to io_apic interrupt routing */ try_ioapic: + ioc->reply_queue_count = 1; r = _base_request_irq(ioc, 0, ioc->pdev->irq); return r; @@ -1534,6 +1558,16 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) } _base_mask_interrupts(ioc); + + r = _base_get_ioc_facts(ioc, CAN_SLEEP); + if (r) + goto out_fail; + + if (!ioc->rdpq_array_enable_assigned) { + ioc->rdpq_array_enable = ioc->rdpq_array_capable; + ioc->rdpq_array_enable_assigned = 1; + } + r = _base_enable_msix(ioc); if (r) goto out_fail; @@ -2331,7 +2365,8 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc) static void _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) { - int i; + int i = 0; + struct reply_post_struct *rps; dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); @@ -2372,15 +2407,25 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) ioc->reply_free = NULL; } - if (ioc->reply_post_free) { - pci_pool_free(ioc->reply_post_free_dma_pool, - ioc->reply_post_free, ioc->reply_post_free_dma); + if (ioc->reply_post) { + do { + rps = &ioc->reply_post[i]; + if (rps->reply_post_free) { + pci_pool_free( + ioc->reply_post_free_dma_pool, + rps->reply_post_free, + rps->reply_post_free_dma); + dexitprintk(ioc, printk(MPT2SAS_INFO_FMT + "reply_post_free_pool(0x%p): free\n", + ioc->name, rps->reply_post_free)); + rps->reply_post_free = NULL; + } + } while (ioc->rdpq_array_enable && + (++i < ioc->reply_queue_count)); + if (ioc->reply_post_free_dma_pool) pci_pool_destroy(ioc->reply_post_free_dma_pool); - dexitprintk(ioc, printk(MPT2SAS_INFO_FMT - "reply_post_free_pool(0x%p): free\n", ioc->name, - ioc->reply_post_free)); - ioc->reply_post_free = NULL; + kfree(ioc->reply_post); } if (ioc->config_page) { @@ -2523,6 +2568,65 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, ioc->chains_needed_per_io)); + /* reply post queue, 16 byte align */ + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + + sz = reply_post_free_sz; + if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) + sz *= ioc->reply_queue_count; + + ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ? + (ioc->reply_queue_count):1, + sizeof(struct reply_post_struct), GFP_KERNEL); + + if (!ioc->reply_post) { + printk(MPT2SAS_ERR_FMT "reply_post_free pool: kcalloc failed\n", + ioc->name); + goto out; + } + ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", + ioc->pdev, sz, 16, 0); + if (!ioc->reply_post_free_dma_pool) { + printk(MPT2SAS_ERR_FMT + "reply_post_free pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + i = 0; + do { + ioc->reply_post[i].reply_post_free = + pci_pool_alloc(ioc->reply_post_free_dma_pool, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) { + printk(MPT2SAS_ERR_FMT + "reply_post_free pool: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + memset(ioc->reply_post[i].reply_post_free, 0, sz); + dinitprintk(ioc, printk(MPT2SAS_INFO_FMT + "reply post free pool (0x%p): depth(%d)," + "element_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->reply_post[i].reply_post_free, + ioc->reply_post_queue_depth, 8, sz/1024)); + dinitprintk(ioc, printk(MPT2SAS_INFO_FMT + "reply_post_free_dma = (0x%llx)\n", ioc->name, + (unsigned long long) + ioc->reply_post[i].reply_post_free_dma)); + total_sz += sz; + } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); + + if (ioc->dma_mask == 64) { + if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { + printk(MPT2SAS_WARN_FMT + "no suitable consistent DMA mask for %s\n", + ioc->name, pci_name(ioc->pdev)); + goto out; + } + } + ioc->scsiio_depth = ioc->hba_queue_depth - ioc->hi_priority_depth - ioc->internal_depth; @@ -2734,37 +2838,6 @@ chain_done: "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma)); total_sz += sz; - /* reply post queue, 16 byte align */ - reply_post_free_sz = ioc->reply_post_queue_depth * - sizeof(Mpi2DefaultReplyDescriptor_t); - if (_base_is_controller_msix_enabled(ioc)) - sz = reply_post_free_sz * ioc->reply_queue_count; - else - sz = reply_post_free_sz; - ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", - ioc->pdev, sz, 16, 0); - if (!ioc->reply_post_free_dma_pool) { - printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create " - "failed\n", ioc->name); - goto out; - } - ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool , - GFP_KERNEL, &ioc->reply_post_free_dma); - if (!ioc->reply_post_free) { - printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc " - "failed\n", ioc->name); - goto out; - } - memset(ioc->reply_post_free, 0, sz); - dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool" - "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8, - sz/1024)); - dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = " - "(0x%llx)\n", ioc->name, (unsigned long long) - ioc->reply_post_free_dma)); - total_sz += sz; - ioc->config_page_sz = 512; ioc->config_page = pci_alloc_consistent(ioc->pdev, ioc->config_page_sz, &ioc->config_page_dma); @@ -3436,6 +3509,9 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) ioc->ir_firmware = 1; + if ((facts->IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE)) + ioc->rdpq_array_capable = 1; facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); facts->IOCRequestFrameSize = le16_to_cpu(mpi_reply.IOCRequestFrameSize); @@ -3471,9 +3547,12 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) { Mpi2IOCInitRequest_t mpi_request; Mpi2IOCInitReply_t mpi_reply; - int r; + int i, r = 0; struct timeval current_time; u16 ioc_status; + u32 reply_post_free_array_sz = 0; + Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL; + dma_addr_t reply_post_free_array_dma; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); @@ -3502,9 +3581,31 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) cpu_to_le64((u64)ioc->request_dma); mpi_request.ReplyFreeQueueAddress = cpu_to_le64((u64)ioc->reply_free_dma); - mpi_request.ReplyDescriptorPostQueueAddress = - cpu_to_le64((u64)ioc->reply_post_free_dma); + if (ioc->rdpq_array_enable) { + reply_post_free_array_sz = ioc->reply_queue_count * + sizeof(Mpi2IOCInitRDPQArrayEntry); + reply_post_free_array = pci_alloc_consistent(ioc->pdev, + reply_post_free_array_sz, &reply_post_free_array_dma); + if (!reply_post_free_array) { + printk(MPT2SAS_ERR_FMT + "reply_post_free_array: pci_alloc_consistent failed\n", + ioc->name); + r = -ENOMEM; + goto out; + } + memset(reply_post_free_array, 0, reply_post_free_array_sz); + for (i = 0; i < ioc->reply_queue_count; i++) + reply_post_free_array[i].RDPQBaseAddress = + cpu_to_le64( + (u64)ioc->reply_post[i].reply_post_free_dma); + mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64)reply_post_free_array_dma); + } else { + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); + } /* This time stamp specifies number of milliseconds * since epoch ~ midnight January 1, 1970. @@ -3532,7 +3633,7 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) if (r != 0) { printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n", ioc->name, __func__, r); - return r; + goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; @@ -3542,7 +3643,12 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) r = -EIO; } - return 0; +out: + if (reply_post_free_array) + pci_free_consistent(ioc->pdev, reply_post_free_array_sz, + reply_post_free_array, + reply_post_free_array_dma); + return r; } /** @@ -4075,7 +4181,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) u8 hide_flag; struct adapter_reply_queue *reply_q; long reply_post_free; - u32 reply_post_free_sz; + u32 reply_post_free_sz, index = 0; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); @@ -4146,19 +4252,27 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) _base_assign_reply_queues(ioc); /* initialize Reply Post Free Queue */ - reply_post_free = (long)ioc->reply_post_free; reply_post_free_sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t); + reply_post_free = (long)ioc->reply_post[index].reply_post_free; list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { reply_q->reply_post_host_index = 0; reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) reply_post_free; for (i = 0; i < ioc->reply_post_queue_depth; i++) reply_q->reply_post_free[i].Words = - cpu_to_le64(ULLONG_MAX); + cpu_to_le64(ULLONG_MAX); if (!_base_is_controller_msix_enabled(ioc)) goto skip_init_reply_post_free_queue; - reply_post_free += reply_post_free_sz; + /* + * If RDPQ is enabled, switch to the next allocation. + * Otherwise advance within the contiguous region. + */ + if (ioc->rdpq_array_enable) + reply_post_free = (long) + ioc->reply_post[++index].reply_post_free; + else + reply_post_free += reply_post_free_sz; } skip_init_reply_post_free_queue: @@ -4286,6 +4400,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) } } + ioc->rdpq_array_enable_assigned = 0; + ioc->dma_mask = 0; r = mpt2sas_base_map_resources(ioc); if (r) goto out_free_resources; @@ -4647,6 +4763,16 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, r = -EFAULT; goto out; } + + r = _base_get_ioc_facts(ioc, CAN_SLEEP); + if (r) + goto out; + + if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) + panic("%s: Issue occurred with flashing controller firmware." + "Please reboot the system and ensure that the correct" + " firmware version is running\n", ioc->name); + r = _base_make_ioc_operational(ioc, sleep_flag); if (!r) _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index e0e4dd48e9dc..9bc8e0b915b8 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -636,6 +636,11 @@ struct mpt2sas_port_facts { u16 MaxPostedCmdBuffers; }; +struct reply_post_struct { + Mpi2ReplyDescriptorsUnion_t *reply_post_free; + dma_addr_t reply_post_free_dma; +}; + /** * enum mutex_type - task management mutex type * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it @@ -663,6 +668,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc); * @ir_firmware: IR firmware present * @bars: bitmask of BAR's that must be configured * @mask_interrupts: ignore interrupt + * @dma_mask: used to set the consistent dma mask * @fault_reset_work_q_name: fw fault work queue * @fault_reset_work_q: "" * @fault_reset_work: "" @@ -779,8 +785,11 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc); * @reply_free_dma_pool: * @reply_free_host_index: tail index in pool to insert free replys * @reply_post_queue_depth: reply post queue depth - * @reply_post_free: pool for reply post (64bit descriptor) - * @reply_post_free_dma: + * @reply_post_struct: struct for reply_post_free physical & virt address + * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init + * @rdpq_array_enable: rdpq_array support is enabled in the driver + * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag + * is assigned only ones * @reply_queue_count: number of reply queue's * @reply_queue_list: link list contaning the reply queue info * @reply_post_host_index: head index in the pool where FW completes IO @@ -802,6 +811,7 @@ struct MPT2SAS_ADAPTER { u8 ir_firmware; int bars; u8 mask_interrupts; + int dma_mask; /* fw fault handler */ char fault_reset_work_q_name[20]; @@ -972,8 +982,10 @@ struct MPT2SAS_ADAPTER { /* reply post queue */ u16 reply_post_queue_depth; - Mpi2ReplyDescriptorsUnion_t *reply_post_free; - dma_addr_t reply_post_free_dma; + struct reply_post_struct *reply_post; + u8 rdpq_array_capable; + u8 rdpq_array_enable; + u8 rdpq_array_enable_assigned; struct dma_pool *reply_post_free_dma_pool; u8 reply_queue_count; struct list_head reply_queue_list; -- cgit From a66dd970c7808f0a3453bbc38b39553f6eafd994 Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:23 +0530 Subject: mpt2sas: Get IOC_FACTS information using handshake protocol only after HBA card gets into READY or Operational state. Driver initialization fails if driver tries to send IOC facts request message when the IOC is in reset or in a fault state. This patch will make sure that 1.Driver to send IOC facts request message only if HBA is in operational or ready state. 2.If IOC is in fault state, a diagnostic reset would be issued. 3.If IOC is in reset state then driver will wait for 10 seconds to exit out of reset state. If the HBA continues to be in reset state, then the HBA wouldn't be claimed by the driver. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/mpt2sas_base.c | 68 +++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index fc9dfda72699..58e45216d1ec 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -95,6 +95,9 @@ MODULE_PARM_DESC(disable_discovery, " disable discovery "); static int _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag); +static int +_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag); + /** * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. * @@ -3460,6 +3463,64 @@ _base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag) return 0; } +/** + * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL + * @ioc: per adapter object + * @timeout: + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_wait_for_iocstate(struct MPT2SAS_ADAPTER *ioc, int timeout, + int sleep_flag) +{ + u32 ioc_state, doorbell; + int rc; + + dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, + __func__)); + + if (ioc->pci_error_recovery) + return 0; + + doorbell = mpt2sas_base_get_iocstate(ioc, 0); + ioc_state = doorbell & MPI2_IOC_STATE_MASK; + dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + + switch (ioc_state) { + case MPI2_IOC_STATE_READY: + case MPI2_IOC_STATE_OPERATIONAL: + return 0; + } + + if (doorbell & MPI2_DOORBELL_USED) { + dhsprintk(ioc, printk(MPT2SAS_INFO_FMT + "unexpected doorbell activ!e\n", ioc->name)); + goto issue_diag_reset; + } + + if (ioc_state == MPI2_IOC_STATE_FAULT) { + mpt2sas_base_fault_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } + + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, + timeout, sleep_flag); + if (ioc_state) { + printk(MPT2SAS_ERR_FMT + "%s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } + + issue_diag_reset: + rc = _base_diag_reset(ioc, sleep_flag); + return rc; +} + /** * _base_get_ioc_facts - obtain ioc facts reply and save in ioc * @ioc: per adapter object @@ -3478,6 +3539,13 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); + r = _base_wait_for_iocstate(ioc, 10, sleep_flag); + if (r) { + printk(MPT2SAS_ERR_FMT "%s: failed getting to correct state\n", + ioc->name, __func__); + return r; + } + mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); memset(&mpi_request, 0, mpi_request_sz); -- cgit From 9b2dcba3aa2287cbd08d1b987fec7a0fd8a0ecb1 Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:24 +0530 Subject: mpt2sas: Bump mpt2sas driver version to 18.100.00.00 Bump mpt2sas driver version to 18.100.00.00. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/mpt2sas_base.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 9bc8e0b915b8..239f169b0673 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,8 +69,8 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation " #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "17.100.00.00" -#define MPT2SAS_MAJOR_VERSION 17 +#define MPT2SAS_DRIVER_VERSION "18.100.00.00" +#define MPT2SAS_MAJOR_VERSION 18 #define MPT2SAS_MINOR_VERSION 100 #define MPT2SAS_BUILD_VERSION 00 #define MPT2SAS_RELEASE_VERSION 00 -- cgit From 861ff736c8d8ae574dcc1ef81e5da84ff85e70c9 Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:25 +0530 Subject: mpt3sas: MPI2.5 Rev G (2.5.2) specifications Below is the change set in MPI2.5 Rev G specification and 2.00.31 header files 1) Added SCSIStatusQualifier to SCSI IO Error Reply message. 2) Added ATA Security Freeze Lock to IO Unit Page 1 Flags field. 3) Added Allow Protection Information bit for IR Volume Create. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt3sas/mpi/mpi2.h | 6 ++++-- drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h | 8 ++++++-- drivers/scsi/mpt3sas/mpi/mpi2_init.h | 6 ++++-- drivers/scsi/mpt3sas/mpi/mpi2_raid.h | 6 +++++- 4 files changed, 19 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h index 20da8f907c00..dc143dda422a 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -8,7 +8,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.29 + * mpi2.h Version: 02.00.31 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -86,6 +86,8 @@ * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. + * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. + * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. * -------------------------------------------------------------------------- */ @@ -119,7 +121,7 @@ #define MPI2_VERSION_02_05 (0x0205) /*Unit and Dev versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x1D) +#define MPI2_HEADER_VERSION_UNIT (0x1F) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 889aa7067899..5b0e5c1778fb 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -6,7 +6,7 @@ * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.24 + * mpi2_cnfg.h Version: 02.00.25 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -160,6 +160,9 @@ * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as * obsolete for MPI v2.5 and later. * Added some defines for 12G SAS speeds. + * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. + * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to + * match the specification. * -------------------------------------------------------------------------- */ @@ -792,6 +795,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 { #define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) /*IO Unit Page 1 Flags defines */ +#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000) #define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) #define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) #define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) @@ -870,7 +874,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 { #define MPI2_IOUNITPAGE5_PAGEVERSION (0x00) /*defines for IO Unit Page 5 DmaEngineCapabilities field */ -#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFF00) +#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000) #define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16) #define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h index f7928bf66478..b3e33313d7a3 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h @@ -6,7 +6,7 @@ * Title: MPI SCSI initiator mode messages and structures * Creation Date: June 23, 2006 * - * mpi2_init.h Version: 02.00.14 + * mpi2_init.h Version: 02.00.15 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -44,6 +44,8 @@ * Priority to match SAM-4. * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. + * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, + * replacing the Reserved4 field. * -------------------------------------------------------------------------- */ @@ -347,7 +349,7 @@ typedef struct _MPI2_SCSI_IO_REPLY { U32 SenseCount; /*0x18 */ U32 ResponseInfo; /*0x1C */ U16 TaskTag; /*0x20 */ - U16 Reserved4; /*0x22 */ + U16 SCSIStatusQualifier; /* 0x22 */ U32 BidirectionalTransferCount; /*0x24 */ U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/ U32 Reserved6; /*0x2C */ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h index 71765236afef..fbe3aae102b0 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h @@ -6,7 +6,7 @@ * Title: MPI Integrated RAID messages and structures * Creation Date: April 26, 2007 * - * mpi2_raid.h Version: 02.00.09 + * mpi2_raid.h Version: 02.00.10 * * Version History * --------------- @@ -30,6 +30,7 @@ * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. + * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. * -------------------------------------------------------------------------- */ @@ -46,6 +47,9 @@ * RAID Action messages ****************************************************************************/ +/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */ +#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000) + /*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ #define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) #define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001) -- cgit From 0f624c391ecbf18e69b20d681f7e3c52b4ef02c1 Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:26 +0530 Subject: mpt3sas: Clear PFA Status on SGPIO when PFA Drive is Removed or Replaced Added code to send an SEP message that turns off the Predictive Failure LED when a drive is removed (if Predictive Failure LED was turned on). Added a new flag 'pfa_led_on' per device that tracks the status of Predictive Failure LED. When the drive is removed, this flag is checked and sends an SEP message to turn off the respective Predictive Failure LED. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt3sas/mpt3sas_base.h | 5 ++- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 67 ++++++++++++++++++++++++++++++------ 2 files changed, 61 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 9b90a6fef706..184826c20874 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -272,8 +272,10 @@ struct _internal_cmd { * @channel: target channel * @slot: number number * @phy: phy identifier provided in sas device page 0 - * @fast_path: fast path feature enable bit * @responding: used in _scsih_sas_device_mark_responding + * @fast_path: fast path feature enable bit + * @pfa_led_on: flag for PFA LED status + * */ struct _sas_device { struct list_head list; @@ -293,6 +295,7 @@ struct _sas_device { u8 phy; u8 responding; u8 fast_path; + u8 pfa_led_on; }; /** diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 135f12c20ecf..77a5bfa100f8 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -159,7 +159,7 @@ struct sense_info { }; #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) -#define MPT3SAS_TURN_ON_FAULT_LED (0xFFFC) +#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC) #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) #define MPT3SAS_ABRT_TASK_SET (0xFFFE) #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) @@ -3885,7 +3885,7 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, #endif /** - * _scsih_turn_on_fault_led - illuminate Fault LED + * _scsih_turn_on_pfa_led - illuminate PFA LED * @ioc: per adapter object * @handle: device handle * Context: process @@ -3893,10 +3893,15 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, * Return nothing. */ static void -_scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) +_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) { Mpi2SepReply_t mpi_reply; Mpi2SepRequest_t mpi_request; + struct _sas_device *sas_device; + + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); + if (!sas_device) + return; memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; @@ -3911,6 +3916,7 @@ _scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) __FILE__, __LINE__, __func__); return; } + sas_device->pfa_led_on = 1; if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { dewtprintk(ioc, pr_info(MPT3SAS_FMT @@ -3920,9 +3926,46 @@ _scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) return; } } +/** + * _scsih_turn_off_pfa_led - turn off Fault LED + * @ioc: per adapter object + * @sas_device: sas device whose PFA LED has to turned off + * Context: process + * + * Return nothing. + */ +static void +_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + Mpi2SepReply_t mpi_reply; + Mpi2SepRequest_t mpi_request; + memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = 0; + mpi_request.Slot = cpu_to_le16(sas_device->slot); + mpi_request.DevHandle = 0; + mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); + mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; + if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, printk(MPT3SAS_FMT + "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + return; + } +} /** - * _scsih_send_event_to_turn_on_fault_led - fire delayed event + * _scsih_send_event_to_turn_on_pfa_led - fire delayed event * @ioc: per adapter object * @handle: device handle * Context: interrupt. @@ -3930,14 +3973,14 @@ _scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) * Return nothing. */ static void -_scsih_send_event_to_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) +_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct fw_event_work *fw_event; fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) return; - fw_event->event = MPT3SAS_TURN_ON_FAULT_LED; + fw_event->event = MPT3SAS_TURN_ON_PFA_LED; fw_event->device_handle = handle; fw_event->ioc = ioc; _scsih_fw_event_add(ioc, fw_event); @@ -3981,7 +4024,7 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) - _scsih_send_event_to_turn_on_fault_led(ioc, handle); + _scsih_send_event_to_turn_on_pfa_led(ioc, handle); /* insert into event log */ sz = offsetof(Mpi2EventNotificationReply_t, EventData) + @@ -4911,7 +4954,11 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, { struct MPT3SAS_TARGET *sas_target_priv_data; - + if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && + (sas_device->pfa_led_on)) { + _scsih_turn_off_pfa_led(ioc, sas_device); + sas_device->pfa_led_on = 0; + } dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, @@ -7065,8 +7112,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) "port enable: complete from worker thread\n", ioc->name)); break; - case MPT3SAS_TURN_ON_FAULT_LED: - _scsih_turn_on_fault_led(ioc, fw_event->device_handle); + case MPT3SAS_TURN_ON_PFA_LED: + _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); break; case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: _scsih_sas_topology_change_event(ioc, fw_event); -- cgit From bd0a791ce281062f29ac984dcb64c960e24d2b0d Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:27 +0530 Subject: mpt3sas: Bump mpt3sas driver version to 03.100.00.00 Bump mpt3sas driver version to 03.100.00.00. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt3sas/mpt3sas_base.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 184826c20874..575f1bcdd51c 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -70,8 +70,8 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "LSI Corporation " #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "02.100.00.00" -#define MPT3SAS_MAJOR_VERSION 2 +#define MPT3SAS_DRIVER_VERSION "03.100.00.00" +#define MPT3SAS_MAJOR_VERSION 3 #define MPT3SAS_MINOR_VERSION 100 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 -- cgit From 4c8bab4d891fac7b44a8e26ec043c42bc910b391 Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:28 +0530 Subject: mpt3sas: MPI2.5 Rev H (2.5.3) specifications Below is the change set in MPI2.5 Rev H specification and 2.00.32 header files 1) Added reserved fields to IO Unit Page 7 for future use. 2) Added optional functionality to IOCInit Request so that the host may specify a separate base address for each Reply Descriptor Post Queue. IOC support for this is indicated using a new IOCCapabilities bit in the IOCFacts Reply. 3) Added Toolbox Console Text Display Tool The host uses the Console Text Display Tool to send a string to IOC's Console using different console types (eg: UART serial terminal or Ethernet terminal). 4) Firmware images can now be signed using an encrypted hash. 5) Added MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL 6) Added more details about configuration page restrictions when Host Based Discovery is enabled Enabling host based discovery affects the availability of some configuration pages and events. The SAS Expander, SAS Device, and SAS Enclosure configuration pages are not available from the IOC. The IOC returns an error status to any Configuration Request message attempting to access these pages. The IOC does not send the SAS Discovery Event, the SAS Topology Change List Event, or the SAS Enclosure Device Status Change Event when host based discovery is enabled. 7) Bit 13 of the SAS IO Unit Page 1 ControlFlags field is now obsolete. It was used to enable limiting direct attached SATA maximum link rate to 1.5 Gbps. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h | 10 ++++-- drivers/scsi/mpt3sas/mpi/mpi2_ioc.h | 62 ++++++++++++++++++++++++++++++++++-- drivers/scsi/mpt3sas/mpi/mpi2_sas.h | 6 ++-- drivers/scsi/mpt3sas/mpi/mpi2_tool.h | 43 ++++++++++++++++++++++++- 4 files changed, 114 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 5b0e5c1778fb..becee079d6d0 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -6,7 +6,7 @@ * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.25 + * mpi2_cnfg.h Version: 02.00.26 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -163,6 +163,8 @@ * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to * match the specification. + * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for + * future use. * -------------------------------------------------------------------------- */ @@ -924,11 +926,15 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { U8 BoardTemperatureUnits; /*0x16 */ U8 Reserved3; /*0x17 */ + U32 Reserved4; /* 0x18 */ + U32 Reserved5; /* 0x1C */ + U32 Reserved6; /* 0x20 */ + U32 Reserved7; /* 0x24 */ } MPI2_CONFIG_PAGE_IO_UNIT_7, *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t; -#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02) +#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04) /*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ #define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h index e2bb82143720..15813f3a4dda 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -6,7 +6,7 @@ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Creation Date: October 11, 2006 * - * mpi2_ioc.h Version: 02.00.22 + * mpi2_ioc.h Version: 02.00.23 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -127,6 +127,11 @@ * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. * Added ElapsedSeconds field to * MPI2_EVENT_DATA_IR_OPERATION_STATUS. + * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE + * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. + * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. + * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. + * Added Encrypted Hash Extended Image. * -------------------------------------------------------------------------- */ @@ -182,6 +187,10 @@ typedef struct _MPI2_IOC_INIT_REQUEST { #define MPI2_WHOINIT_HOST_DRIVER (0x04) #define MPI2_WHOINIT_MANUFACTURER (0x05) +/* MsgFlags */ +#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) + + /*MsgVersion */ #define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00) #define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8) @@ -194,9 +203,19 @@ typedef struct _MPI2_IOC_INIT_REQUEST { #define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) #define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) -/*minimum depth for the Reply Descriptor Post Queue */ +/*minimum depth for a Reply Descriptor Post Queue */ #define MPI2_RDPQ_DEPTH_MIN (16) +/* Reply Descriptor Post Queue Array Entry */ +typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY { + U64 RDPQBaseAddress; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, +*PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, +Mpi2IOCInitRDPQArrayEntry, *pMpi2IOCInitRDPQArrayEntry; + + /*IOCInit Reply message */ typedef struct _MPI2_IOC_INIT_REPLY { U8 WhoInit; /*0x00 */ @@ -306,6 +325,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY { /*ProductID field uses MPI2_FW_HEADER_PID_ */ /*IOCCapabilities */ +#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) #define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000) #define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) #define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) @@ -1140,6 +1160,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST { #define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) #define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) #define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C) #define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) /*MPI v2.0 FWDownload TransactionContext Element */ @@ -1404,6 +1425,7 @@ typedef struct _MPI2_EXT_IMAGE_HEADER { #define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) #define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) #define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) +#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) #define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) #define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) @@ -1560,6 +1582,42 @@ typedef struct _MPI2_INIT_IMAGE_FOOTER { /*defines for the ResetVector field */ #define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) + +/* Encrypted Hash Extended Image Data */ + +typedef struct _MPI25_ENCRYPTED_HASH_ENTRY { + U8 HashImageType; /* 0x00 */ + U8 HashAlgorithm; /* 0x01 */ + U8 EncryptionAlgorithm; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 Reserved2; /* 0x04 */ + U32 EncryptedHash[1]; /* 0x08 */ /* variable length */ +} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY, +Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t; + +/* values for HashImageType */ +#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) +#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) + +/* values for HashAlgorithm */ +#define MPI25_HASH_ALGORITHM_UNUSED (0x00) +#define MPI25_HASH_ALGORITHM_SHA256 (0x01) + +/* values for EncryptionAlgorithm */ +#define MPI25_ENCRYPTION_ALG_UNUSED (0x00) +#define MPI25_ENCRYPTION_ALG_RSA256 (0x01) + +typedef struct _MPI25_ENCRYPTED_HASH_DATA { + U8 ImageVersion; /* 0x00 */ + U8 NumHash; /* 0x01 */ + U16 Reserved1; /* 0x02 */ + U32 Reserved2; /* 0x04 */ + MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */ +} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA, +Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t; + + + /**************************************************************************** * PowerManagementControl message ****************************************************************************/ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h index cba046f6a4b4..361a275b0cce 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h @@ -6,7 +6,7 @@ * Title: MPI Serial Attached SCSI structures and definitions * Creation Date: February 9, 2007 * - * mpi2_sas.h Version: 02.00.07 + * mpi2_sas.h Version: 02.00.08 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -30,6 +30,8 @@ * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA * Passthrough Request message. + * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete + * for anything newer than MPI v2.0. * -------------------------------------------------------------------------- */ @@ -251,7 +253,7 @@ typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST { #define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08) #define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A) #define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B) -#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) +#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) /* MPI v2.0 only */ #define MPI2_SAS_OP_REMOVE_DEVICE (0x0D) #define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E) #define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h index 34e9a7ba76b0..94e32c2a47fb 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h @@ -6,7 +6,7 @@ * Title: MPI diagnostic tool structures and definitions * Creation Date: March 26, 2007 * - * mpi2_tool.h Version: 02.00.10 + * mpi2_tool.h Version: 02.00.11 * * Version History * --------------- @@ -32,6 +32,7 @@ * message. * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that * it uses MPI Chain SGE as well as MPI Simple SGE. + * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. * -------------------------------------------------------------------------- */ @@ -51,6 +52,7 @@ #define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) #define MPI2_TOOLBOX_BEACON_TOOL (0x05) #define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) +#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07) /**************************************************************************** * Toolbox reply @@ -331,6 +333,45 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY { Mpi2ToolboxDiagnosticCliReply_t, *pMpi2ToolboxDiagnosticCliReply_t; + +/**************************************************************************** +* Toolbox Console Text Display Tool +****************************************************************************/ + +/* Toolbox Console Text Display Tool request message */ +typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST { + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Console; /* 0x0C */ + U8 Flags; /* 0x0D */ + U16 Reserved6; /* 0x0E */ + U8 TextToDisplay[4]; /* 0x10 */ +} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, +*PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, +Mpi2ToolboxTextDisplayRequest_t, +*pMpi2ToolboxTextDisplayRequest_t; + +/* defines for the Console field */ +#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0) +#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00) +#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10) +#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20) + +#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F) + +/* defines for the Flags field */ +#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01) + + + /***************************************************************************** * * Diagnostic Buffer Messages -- cgit From a4ffce0d63e0683060fee6bc269f7b0b12c9700a Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:29 +0530 Subject: mpt3sas: Copyright in driver sources is updated for year the 2014. Copyright in driver sources is updated for year the 2014. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt3sas/Kconfig | 2 +- drivers/scsi/mpt3sas/mpi/mpi2.h | 2 +- drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h | 2 +- drivers/scsi/mpt3sas/mpi/mpi2_init.h | 2 +- drivers/scsi/mpt3sas/mpi/mpi2_ioc.h | 2 +- drivers/scsi/mpt3sas/mpi/mpi2_raid.h | 2 +- drivers/scsi/mpt3sas/mpi/mpi2_sas.h | 2 +- drivers/scsi/mpt3sas/mpi/mpi2_tool.h | 2 +- drivers/scsi/mpt3sas/mpi/mpi2_type.h | 2 +- drivers/scsi/mpt3sas/mpt3sas_base.c | 2 +- drivers/scsi/mpt3sas/mpt3sas_base.h | 2 +- drivers/scsi/mpt3sas/mpt3sas_config.c | 2 +- drivers/scsi/mpt3sas/mpt3sas_ctl.c | 2 +- drivers/scsi/mpt3sas/mpt3sas_ctl.h | 2 +- drivers/scsi/mpt3sas/mpt3sas_debug.h | 2 +- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 2 +- drivers/scsi/mpt3sas/mpt3sas_transport.c | 2 +- drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c | 2 +- drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h | 2 +- 19 files changed, 19 insertions(+), 19 deletions(-) diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig index d53e1b02e893..4d235dd741bf 100644 --- a/drivers/scsi/mpt3sas/Kconfig +++ b/drivers/scsi/mpt3sas/Kconfig @@ -2,7 +2,7 @@ # Kernel configuration file for the MPT3SAS # # This code is based on drivers/scsi/mpt3sas/Kconfig -# Copyright (C) 2012-2013 LSI Corporation +# Copyright (C) 2012-2014 LSI Corporation # (mailto:DL-MPTFusionLinux@lsi.com) # This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h index dc143dda422a..c34c1157907b 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2.h diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index becee079d6d0..e261a3153bb3 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_cnfg.h diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h index b3e33313d7a3..068c98efd742 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_init.h diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h index 15813f3a4dda..490830957806 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_ioc.h diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h index fbe3aae102b0..13d93ca029d5 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_raid.h diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h index 361a275b0cce..156e30543a2f 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_sas.h diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h index 94e32c2a47fb..904910d8a737 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_tool.h diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h index ba1fed50966e..99ab093602e8 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_type.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 LSI Corporation. + * Copyright (c) 2000-2014 LSI Corporation. * * * Name: mpi2_type.h diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 09e6b21ff4ef..b8f22a68fcd0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -3,7 +3,7 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c - * Copyright (C) 2012-2013 LSI Corporation + * Copyright (C) 2012-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 575f1bcdd51c..86b931e6c6af 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -3,7 +3,7 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h - * Copyright (C) 2012-2013 LSI Corporation + * Copyright (C) 2012-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 936ec0391990..4472c2af9255 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -2,7 +2,7 @@ * This module provides common API for accessing firmware configuration pages * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c - * Copyright (C) 2012-2013 LSI Corporation + * Copyright (C) 2012-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index ba9cbe598a91..dca14877d5ab 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -3,7 +3,7 @@ * controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c - * Copyright (C) 2012-2013 LSI Corporation + * Copyright (C) 2012-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h index 53b0c480d98f..5f3d7fd7c2f8 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h @@ -3,7 +3,7 @@ * controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h - * Copyright (C) 2012-2013 LSI Corporation + * Copyright (C) 2012-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h index 545b22d2cbdf..4778e7dd98bd 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_debug.h +++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h @@ -2,7 +2,7 @@ * Logging Support for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c - * Copyright (C) 2012-2013 LSI Corporation + * Copyright (C) 2012-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 77a5bfa100f8..95a1049472f0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2,7 +2,7 @@ * Scsi Host Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c - * Copyright (C) 2012-2013 LSI Corporation + * Copyright (C) 2012-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 65170cb1a00f..d4bafaaebea9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -2,7 +2,7 @@ * SAS Transport Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c - * Copyright (C) 2012-2013 LSI Corporation + * Copyright (C) 2012-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c index f6533ab20364..8a2dd113f401 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c @@ -3,7 +3,7 @@ * (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c - * Copyright (C) 2012-2013 LSI Corporation + * Copyright (C) 2012-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h index bb693923bef1..f681db56c53b 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h @@ -4,7 +4,7 @@ * controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h - * Copyright (C) 2012-2013 LSI Corporation + * Copyright (C) 2012-2014 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or -- cgit From 1117b31a6fa411f9e367bfff1721e8f33f5b2d66 Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:30 +0530 Subject: mpt3sas: Added OEM branding Strings Added following branding Strings for Intel custom HBAs support. Driver String: Vendor ID Device ID SubSystemVendor ID SubSystemDevice ID Intel(R) Integrated RAID Module RMS3JC080 0x1000 0x0097 0x8086 0x3521 Intel(R) RAID Controller RS3GC008 0x1000 0x0097 0x8086 0x3522 Intel(R) RAID Controller RS3FC044 0x1000 0x0097 0x8086 0x3523 Intel(R) RAID Controller RS3UC080 0x1000 0x0097 0x8086 0x3524 Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt3sas/mpt3sas_base.c | 49 +++++++++++++++++++++++++++++++++++++ drivers/scsi/mpt3sas/mpt3sas_base.h | 18 ++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index b8f22a68fcd0..e94993e2eb46 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2185,6 +2185,53 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) &ioc->scsi_lookup_lock); } +/** + * _base_display_intel_branding - Display branding string + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_base_display_intel_branding(struct MPT3SAS_ADAPTER *ioc) +{ + if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) + return; + + switch (ioc->pdev->device) { + case MPI25_MFGPAGE_DEVID_SAS3008: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_INTEL_RMS3JC080_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RMS3JC080_BRANDING); + break; + + case MPT3SAS_INTEL_RS3GC008_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RS3GC008_BRANDING); + break; + case MPT3SAS_INTEL_RS3FC044_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RS3FC044_BRANDING); + break; + case MPT3SAS_INTEL_RS3UC080_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RS3UC080_BRANDING); + break; + default: + pr_info(MPT3SAS_FMT + "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + break; + default: + pr_info(MPT3SAS_FMT + "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } +} + /** @@ -2216,6 +2263,8 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) (bios_version & 0x0000FF00) >> 8, bios_version & 0x000000FF); + _base_display_intel_branding(ioc); + pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 86b931e6c6af..e920728022d3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -130,7 +130,25 @@ #define MPT_TARGET_FLAGS_DELETED 0x04 #define MPT_TARGET_FASTPATH_IO 0x08 +/* + * Intel HBA branding + */ +#define MPT3SAS_INTEL_RMS3JC080_BRANDING \ + "Intel(R) Integrated RAID Module RMS3JC080" +#define MPT3SAS_INTEL_RS3GC008_BRANDING \ + "Intel(R) RAID Controller RS3GC008" +#define MPT3SAS_INTEL_RS3FC044_BRANDING \ + "Intel(R) RAID Controller RS3FC044" +#define MPT3SAS_INTEL_RS3UC080_BRANDING \ + "Intel(R) RAID Controller RS3UC080" +/* + * Intel HBA SSDIDs + */ +#define MPT3SAS_INTEL_RMS3JC080_SSDID 0x3521 +#define MPT3SAS_INTEL_RS3GC008_SSDID 0x3522 +#define MPT3SAS_INTEL_RS3FC044_SSDID 0x3523 +#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524 /* * status bits for ioc->diag_buffer_status -- cgit From 9b05c91ae7fbff96864ca7656d81d5980996e8f4 Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:31 +0530 Subject: mpt3sas: Added Reply Descriptor Post Queue (RDPQ) Array support Up to now, Driver allocates a single contiguous block of memory pool for all reply queues and passes down a single address in the ReplyDescriptorPostQueueAddress field of the IOC Init Request Message to the firmware. When firmware receives this address, it will program each of the Reply Descriptor Post Queue registers, as each reply queue has its own register. Thus the firmware, starting from a base address it determines the starting address of the subsequent reply queues through some simple arithmetic calculations. The size of this contiguous block of memory pool is directly proportional to number of MSI-X vectors and the HBA queue depth. For example higher MSIX vectors requires larger contiguous block of memory pool. But some of the OS kernels are unable to allocate this larger contiguous block of memory pool. So, the proposal is to allocate memory independently for each Reply Queue and pass down all of the addresses to the firmware. Then the firmware will just take each address and program the value into the correct register. When HBAs with older firmware(i.e. without RDPQ capability) is used with this new driver then the max_msix_vectors value would be set to 8 by default. Change set in v1: 1. Declared the _base_get_ioc_facts() functions at the beginning of the mpt3sas_base.c file instead of moving all these functions before mpt3sas_base_map_resources() function a. _base_wait_for_doorbell_int() b. _base_wait_for_doorbell_ack() c. _base_wait_for_doorbell_not_used() d. _base_handshake_req_reply_wait() e. _base_get_ioc_facts() 2. Initially set the consistent DMA mask to 32 bit and then change it to 64 bit mask after allocating RDPQ pools by calling the function _base_change_consistent_dma_mask. This is to ensure that all the upper 32 bits of RDPQ entries's base address to be same. 3. Reduced the redundancy between the RDPQ and non-RDPQ support in these following functions a. _base_release_memory_pools() b. _base_allocate_memory_pools() c. _base_send_ioc_init() d. _base_make_ioc_operational() Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt3sas/mpt3sas_base.c | 232 +++++++++++++++++++++++++++--------- drivers/scsi/mpt3sas/mpt3sas_base.h | 20 +++- 2 files changed, 191 insertions(+), 61 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index e94993e2eb46..1560115079c7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -91,6 +91,8 @@ static int mpt3sas_fwfault_debug; MODULE_PARM_DESC(mpt3sas_fwfault_debug, " enable detection of firmware fault and halt firmware - (default=0)"); +static int +_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag); /** * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. @@ -1482,17 +1484,22 @@ static int _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { struct sysinfo s; - char *desc = NULL; + u64 consistent_dma_mask; + + if (ioc->dma_mask) + consistent_dma_mask = DMA_BIT_MASK(64); + else + consistent_dma_mask = DMA_BIT_MASK(32); if (sizeof(dma_addr_t) > 4) { const uint64_t required_mask = dma_get_required_mask(&pdev->dev); if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) { ioc->base_add_sg_single = &_base_add_sg_single_64; ioc->sge_size = sizeof(Mpi2SGESimple64_t); - desc = "64"; + ioc->dma_mask = 64; goto out; } } @@ -1501,16 +1508,27 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { ioc->base_add_sg_single = &_base_add_sg_single_32; ioc->sge_size = sizeof(Mpi2SGESimple32_t); - desc = "32"; + ioc->dma_mask = 32; } else return -ENODEV; out: si_meminfo(&s); pr_info(MPT3SAS_FMT - "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", - ioc->name, desc, convert_to_kb(s.totalram)); + "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", + ioc->name, ioc->dma_mask, convert_to_kb(s.totalram)); + + return 0; +} +static int +_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, + struct pci_dev *pdev) +{ + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + return -ENODEV; + } return 0; } @@ -1698,11 +1716,15 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors); + if (!ioc->rdpq_array_enable && max_msix_vectors == -1) + max_msix_vectors = 8; + if (max_msix_vectors > 0) { ioc->reply_queue_count = min_t(int, max_msix_vectors, ioc->reply_queue_count); ioc->msix_vector_count = ioc->reply_queue_count; - } + } else if (max_msix_vectors == 0) + goto try_ioapic; entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), GFP_KERNEL); @@ -1742,6 +1764,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) /* failback to io_apic interrupt routing */ try_ioapic: + ioc->reply_queue_count = 1; r = _base_request_irq(ioc, 0, ioc->pdev->irq); return r; @@ -1821,6 +1844,16 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) } _base_mask_interrupts(ioc); + + r = _base_get_ioc_facts(ioc, CAN_SLEEP); + if (r) + goto out_fail; + + if (!ioc->rdpq_array_enable_assigned) { + ioc->rdpq_array_enable = ioc->rdpq_array_capable; + ioc->rdpq_array_enable_assigned = 1; + } + r = _base_enable_msix(ioc); if (r) goto out_fail; @@ -2496,7 +2529,8 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) static void _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) { - int i; + int i = 0; + struct reply_post_struct *rps; dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, __func__)); @@ -2541,15 +2575,25 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->reply_free = NULL; } - if (ioc->reply_post_free) { - pci_pool_free(ioc->reply_post_free_dma_pool, - ioc->reply_post_free, ioc->reply_post_free_dma); + if (ioc->reply_post) { + do { + rps = &ioc->reply_post[i]; + if (rps->reply_post_free) { + pci_pool_free( + ioc->reply_post_free_dma_pool, + rps->reply_post_free, + rps->reply_post_free_dma); + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_post_free_pool(0x%p): free\n", + ioc->name, rps->reply_post_free)); + rps->reply_post_free = NULL; + } + } while (ioc->rdpq_array_enable && + (++i < ioc->reply_queue_count)); + if (ioc->reply_post_free_dma_pool) pci_pool_destroy(ioc->reply_post_free_dma_pool); - dexitprintk(ioc, pr_info(MPT3SAS_FMT - "reply_post_free_pool(0x%p): free\n", ioc->name, - ioc->reply_post_free)); - ioc->reply_post_free = NULL; + kfree(ioc->reply_post); } if (ioc->config_page) { @@ -2696,6 +2740,65 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, ioc->chains_needed_per_io)); + /* reply post queue, 16 byte align */ + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + + sz = reply_post_free_sz; + if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) + sz *= ioc->reply_queue_count; + + ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ? + (ioc->reply_queue_count):1, + sizeof(struct reply_post_struct), GFP_KERNEL); + + if (!ioc->reply_post) { + pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n", + ioc->name); + goto out; + } + ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", + ioc->pdev, sz, 16, 0); + if (!ioc->reply_post_free_dma_pool) { + pr_err(MPT3SAS_FMT + "reply_post_free pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + i = 0; + do { + ioc->reply_post[i].reply_post_free = + pci_pool_alloc(ioc->reply_post_free_dma_pool, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) { + pr_err(MPT3SAS_FMT + "reply_post_free pool: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + memset(ioc->reply_post[i].reply_post_free, 0, sz); + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "reply post free pool (0x%p): depth(%d)," + "element_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->reply_post[i].reply_post_free, + ioc->reply_post_queue_depth, 8, sz/1024)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_post_free_dma = (0x%llx)\n", ioc->name, + (unsigned long long) + ioc->reply_post[i].reply_post_free_dma)); + total_sz += sz; + } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); + + if (ioc->dma_mask == 64) { + if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { + pr_warn(MPT3SAS_FMT + "no suitable consistent DMA mask for %s\n", + ioc->name, pci_name(ioc->pdev)); + goto out; + } + } + ioc->scsiio_depth = ioc->hba_queue_depth - ioc->hi_priority_depth - ioc->internal_depth; @@ -2910,40 +3013,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) ioc->name, (unsigned long long)ioc->reply_free_dma)); total_sz += sz; - /* reply post queue, 16 byte align */ - reply_post_free_sz = ioc->reply_post_queue_depth * - sizeof(Mpi2DefaultReplyDescriptor_t); - if (_base_is_controller_msix_enabled(ioc)) - sz = reply_post_free_sz * ioc->reply_queue_count; - else - sz = reply_post_free_sz; - ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", - ioc->pdev, sz, 16, 0); - if (!ioc->reply_post_free_dma_pool) { - pr_err(MPT3SAS_FMT - "reply_post_free pool: pci_pool_create failed\n", - ioc->name); - goto out; - } - ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool , - GFP_KERNEL, &ioc->reply_post_free_dma); - if (!ioc->reply_post_free) { - pr_err(MPT3SAS_FMT - "reply_post_free pool: pci_pool_alloc failed\n", - ioc->name); - goto out; - } - memset(ioc->reply_post_free, 0, sz); - dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \ - "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8, - sz/1024)); - dinitprintk(ioc, pr_info(MPT3SAS_FMT - "reply_post_free_dma = (0x%llx)\n", - ioc->name, (unsigned long long) - ioc->reply_post_free_dma)); - total_sz += sz; - ioc->config_page_sz = 512; ioc->config_page = pci_alloc_consistent(ioc->pdev, ioc->config_page_sz, &ioc->config_page_dma); @@ -3626,6 +3695,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) ioc->ir_firmware = 1; + if ((facts->IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE)) + ioc->rdpq_array_capable = 1; facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); facts->IOCRequestFrameSize = le16_to_cpu(mpi_reply.IOCRequestFrameSize); @@ -3662,9 +3734,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) { Mpi2IOCInitRequest_t mpi_request; Mpi2IOCInitReply_t mpi_reply; - int r; + int i, r = 0; struct timeval current_time; u16 ioc_status; + u32 reply_post_free_array_sz = 0; + Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL; + dma_addr_t reply_post_free_array_dma; dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, __func__)); @@ -3693,9 +3768,31 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) cpu_to_le64((u64)ioc->request_dma); mpi_request.ReplyFreeQueueAddress = cpu_to_le64((u64)ioc->reply_free_dma); - mpi_request.ReplyDescriptorPostQueueAddress = - cpu_to_le64((u64)ioc->reply_post_free_dma); + if (ioc->rdpq_array_enable) { + reply_post_free_array_sz = ioc->reply_queue_count * + sizeof(Mpi2IOCInitRDPQArrayEntry); + reply_post_free_array = pci_alloc_consistent(ioc->pdev, + reply_post_free_array_sz, &reply_post_free_array_dma); + if (!reply_post_free_array) { + pr_err(MPT3SAS_FMT + "reply_post_free_array: pci_alloc_consistent failed\n", + ioc->name); + r = -ENOMEM; + goto out; + } + memset(reply_post_free_array, 0, reply_post_free_array_sz); + for (i = 0; i < ioc->reply_queue_count; i++) + reply_post_free_array[i].RDPQBaseAddress = + cpu_to_le64( + (u64)ioc->reply_post[i].reply_post_free_dma); + mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64)reply_post_free_array_dma); + } else { + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); + } /* This time stamp specifies number of milliseconds * since epoch ~ midnight January 1, 1970. @@ -3723,7 +3820,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) if (r != 0) { pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", ioc->name, __func__, r); - return r; + goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; @@ -3733,7 +3830,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) r = -EIO; } - return 0; +out: + if (reply_post_free_array) + pci_free_consistent(ioc->pdev, reply_post_free_array_sz, + reply_post_free_array, + reply_post_free_array_dma); + return r; } /** @@ -4283,7 +4385,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) struct _tr_list *delayed_tr, *delayed_tr_next; struct adapter_reply_queue *reply_q; long reply_post_free; - u32 reply_post_free_sz; + u32 reply_post_free_sz, index = 0; dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, __func__)); @@ -4354,9 +4456,9 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) _base_assign_reply_queues(ioc); /* initialize Reply Post Free Queue */ - reply_post_free = (long)ioc->reply_post_free; reply_post_free_sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t); + reply_post_free = (long)ioc->reply_post[index].reply_post_free; list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { reply_q->reply_post_host_index = 0; reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) @@ -4366,7 +4468,15 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) cpu_to_le64(ULLONG_MAX); if (!_base_is_controller_msix_enabled(ioc)) goto skip_init_reply_post_free_queue; - reply_post_free += reply_post_free_sz; + /* + * If RDPQ is enabled, switch to the next allocation. + * Otherwise advance within the contiguous region. + */ + if (ioc->rdpq_array_enable) + reply_post_free = (long) + ioc->reply_post[++index].reply_post_free; + else + reply_post_free += reply_post_free_sz; } skip_init_reply_post_free_queue: @@ -4477,6 +4587,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) goto out_free_resources; } + ioc->rdpq_array_enable_assigned = 0; + ioc->dma_mask = 0; r = mpt3sas_base_map_resources(ioc); if (r) goto out_free_resources; @@ -4853,6 +4965,12 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, r = _base_get_ioc_facts(ioc, CAN_SLEEP); if (r) goto out; + + if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) + panic("%s: Issue occurred with flashing controller firmware." + "Please reboot the system and ensure that the correct" + " firmware version is running\n", ioc->name); + r = _base_make_ioc_operational(ioc, sleep_flag); if (!r) _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index e920728022d3..ca4a7ccbb66e 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -569,6 +569,11 @@ struct mpt3sas_port_facts { u16 MaxPostedCmdBuffers; }; +struct reply_post_struct { + Mpi2ReplyDescriptorsUnion_t *reply_post_free; + dma_addr_t reply_post_free_dma; +}; + /** * enum mutex_type - task management mutex type * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it @@ -597,6 +602,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @ir_firmware: IR firmware present * @bars: bitmask of BAR's that must be configured * @mask_interrupts: ignore interrupt + * @dma_mask: used to set the consistent dma mask * @fault_reset_work_q_name: fw fault work queue * @fault_reset_work_q: "" * @fault_reset_work: "" @@ -712,8 +718,11 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @reply_free_dma_pool: * @reply_free_host_index: tail index in pool to insert free replys * @reply_post_queue_depth: reply post queue depth - * @reply_post_free: pool for reply post (64bit descriptor) - * @reply_post_free_dma: + * @reply_post_struct: struct for reply_post_free physical & virt address + * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init + * @rdpq_array_enable: rdpq_array support is enabled in the driver + * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag + * is assigned only ones * @reply_queue_count: number of reply queue's * @reply_queue_list: link list contaning the reply queue info * @reply_post_host_index: head index in the pool where FW completes IO @@ -735,6 +744,7 @@ struct MPT3SAS_ADAPTER { u8 ir_firmware; int bars; u8 mask_interrupts; + int dma_mask; /* fw fault handler */ char fault_reset_work_q_name[20]; @@ -914,8 +924,10 @@ struct MPT3SAS_ADAPTER { /* reply post queue */ u16 reply_post_queue_depth; - Mpi2ReplyDescriptorsUnion_t *reply_post_free; - dma_addr_t reply_post_free_dma; + struct reply_post_struct *reply_post; + u8 rdpq_array_capable; + u8 rdpq_array_enable; + u8 rdpq_array_enable_assigned; struct dma_pool *reply_post_free_dma_pool; u8 reply_queue_count; struct list_head reply_queue_list; -- cgit From 70d8c86ebeaca552801500d20d5f2c6cc235cabd Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:32 +0530 Subject: mpt3sas: Bump mpt3sas driver version to 04.100.00.00 Bump mpt3sas driver version to 04.100.00.00. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt3sas/mpt3sas_base.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index ca4a7ccbb66e..40926aa9b24d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -70,8 +70,8 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "LSI Corporation " #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "03.100.00.00" -#define MPT3SAS_MAJOR_VERSION 3 +#define MPT3SAS_DRIVER_VERSION "04.100.00.00" +#define MPT3SAS_MAJOR_VERSION 4 #define MPT3SAS_MINOR_VERSION 100 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 -- cgit From b65f1d4da7df44835bd0a2452332e253a5c66d9b Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 12 Sep 2014 15:35:33 +0530 Subject: mpt3sas, mpt2sas: fix scsi_add_host error handling problems in _scsih_probe In _scsih_probe, propagate the return value from scsi_add_host. In mpt3sas, avoid calling list_del twice if that returns an error, which causes list_del corruption warnings if an error is returned. Tested with blk-mq and scsi-mq patches to properly cleanup from and propagate blk_mq_init_rq_map errors. Signed-off-by: Robert Elliott Acked-by: Sreekanth Reddy Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 8 ++++++-- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 9 ++++++--- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 231ec34d8ea9..992a224e51ee 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -8147,6 +8147,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct MPT2SAS_ADAPTER *ioc; struct Scsi_Host *shost; + int rv; shost = scsi_host_alloc(&scsih_driver_template, sizeof(struct MPT2SAS_ADAPTER)); @@ -8242,6 +8243,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!ioc->firmware_event_thread) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; goto out_thread_fail; } @@ -8249,6 +8251,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) if ((mpt2sas_base_attach(ioc))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; goto out_attach_fail; } @@ -8266,7 +8269,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) } else ioc->hide_drives = 0; - if ((scsi_add_host(shost, &pdev->dev))) { + rv = scsi_add_host(shost, &pdev->dev); + if (rv) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out_add_shost_fail; @@ -8283,7 +8287,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) out_thread_fail: list_del(&ioc->list); scsi_host_put(shost); - return -ENODEV; + return rv; } #ifdef CONFIG_PM diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 95a1049472f0..857276b8880f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -7781,6 +7781,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct MPT3SAS_ADAPTER *ioc; struct Scsi_Host *shost; + int rv; shost = scsi_host_alloc(&scsih_driver_template, sizeof(struct MPT3SAS_ADAPTER)); @@ -7873,6 +7874,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!ioc->firmware_event_thread) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; goto out_thread_fail; } @@ -7880,12 +7882,13 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) if ((mpt3sas_base_attach(ioc))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; goto out_attach_fail; } - if ((scsi_add_host(shost, &pdev->dev))) { + rv = scsi_add_host(shost, &pdev->dev); + if (rv) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); - list_del(&ioc->list); goto out_add_shost_fail; } @@ -7898,7 +7901,7 @@ out_add_shost_fail: out_thread_fail: list_del(&ioc->list); scsi_host_put(shost); - return -ENODEV; + return rv; } #ifdef CONFIG_PM -- cgit From 07e38d94ef3646ccee4f222ae1f3033bb37f7fa0 Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Fri, 12 Sep 2014 18:57:13 +0530 Subject: megaraid_sas : Do not scan non syspd drives Resending the patch. Addressed the review comments from Tomas Henzl. Current driver allow device scan for all the devices on channel 0 and 1. E.a If we have two single drive raid volumes, we may see prints like below. First two prints are for physical device which are used to form VD. Prints like this creates confusion as it is really not required to scan any hidden physical devices. scsi1 : LSI SAS based MegaRAID driver scsi 1:0:0:0: Direct-Access LSI MR9361-8i 4.21 PQ: 0 ANSI: 5 scsi 1:0:1:0: Direct-Access LSI MR9361-8i 4.21 PQ: 0 ANSI: 5 scsi 1:2:0:0: Direct-Access LSI MR9361-8i 4.21 PQ: 0 ANSI: 5 scsi 1:2:1:0: Direct-Access LSI MR9361-8i 4.21 PQ: 0 ANSI: 5 When slave_alloc called, sdev-type will not be set, so current code will always return "0" in slave_alloc callback. This patch make sure that driver return "-ENXIO" for non-syspd devices. After this patch, we will see prints in syslog only for devices which are exposed. For current example, below print will be available in syslog. scsi1 : LSI SAS based MegaRAID driver scsi 1:2:0:0: Direct-Access LSI MR9361-8i 4.21 PQ: 0 ANSI: 5 scsi 1:2:1:0: Direct-Access LSI MR9361-8i 4.21 PQ: 0 ANSI: 5 Signed-off-by: Sumit Saxena Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas_base.c | 35 ++++--------------------------- 1 file changed, 4 insertions(+), 31 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index ff44f2ce4153..a894f13c246f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1628,36 +1628,12 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no) static int megasas_slave_configure(struct scsi_device *sdev) { - u16 pd_index = 0; - struct megasas_instance *instance ; - - instance = megasas_lookup_instance(sdev->host->host_no); - - /* - * Don't export physical disk devices to the disk driver. - * - * FIXME: Currently we don't export them to the midlayer at all. - * That will be fixed once LSI engineers have audited the - * firmware for possible issues. - */ - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && - sdev->type == TYPE_DISK) { - pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + - sdev->id; - if (instance->pd_list[pd_index].driveState == - MR_PD_STATE_SYSTEM) { - blk_queue_rq_timeout(sdev->request_queue, - MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); - return 0; - } - return -ENXIO; - } - /* * The RAID firmware may require extended timeouts. */ blk_queue_rq_timeout(sdev->request_queue, MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); + return 0; } @@ -1666,18 +1642,15 @@ static int megasas_slave_alloc(struct scsi_device *sdev) u16 pd_index = 0; struct megasas_instance *instance ; instance = megasas_lookup_instance(sdev->host->host_no); - if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) && - (sdev->type == TYPE_DISK)) { + if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { /* * Open the OS scan to the SYSTEM PD */ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; - if ((instance->pd_list[pd_index].driveState == - MR_PD_STATE_SYSTEM) && - (instance->pd_list[pd_index].driveType == - TYPE_DISK)) { + if (instance->pd_list[pd_index].driveState == + MR_PD_STATE_SYSTEM) { return 0; } return -ENXIO; -- cgit From 0756040952582c4e7c2f23ff2af882c0f0c7a516 Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Fri, 12 Sep 2014 18:57:18 +0530 Subject: megaraid_sas : Use writeq for 64bit pci write to avoid spinlock overhead Resending the patch. Addressed the review comments from Tomas Henzl. Reduce the assingment for u64 req_data variable. Use writeq() for 64bit PCI write instead of writel() to avoid additional lock overhead. Signed-off-by: Sumit Saxena Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 57b47fe69072..8898817eaf8b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1065,6 +1065,11 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance, u32 req_desc_hi, struct megasas_register_set __iomem *regs) { +#if defined(writeq) && defined(CONFIG_64BIT) + u64 req_data = (((u64)req_desc_hi << 32) | (u32)req_desc_lo); + + writeq(le64_to_cpu(req_data), &(regs)->inbound_low_queue_port); +#else unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); @@ -1072,6 +1077,7 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance, writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port); writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); +#endif } /** -- cgit From db4fc864ae2a27153f7f0c2af169ad4447cb82bc Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Fri, 12 Sep 2014 18:57:23 +0530 Subject: megaraid_sas : Update threshold based reply post host index register Resending the patch. Addressed the review comments from Tomas Henzl. Current driver updates reply post host index to let firmware know that replies are processed, while returning from ISR function, only if there is no oustanding replies in reply queue. Driver will free the request frame immediately from ISR but reply post host index is not yet updated. It means freed request can be used by submission path and there may be a tight loop in request/reply path. In such condition, firmware may crash when it tries to post reply and there is no free reply post descriptor. Eventually two things needs to be change to avoid this issue. Increase reply queue depth (double than request queue) to accommodate worst case scenario. Update reply post host index to firmware once it reach to some pre-defined threshold value. This change will make sure that firmware will always have some buffer of reply descriptor and will never find empty reply descriptor in completion path. Signed-off-by: Sumit Saxena Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 23 ++++++++++++++++++++++- drivers/scsi/megaraid/megaraid_sas_fusion.h | 1 + 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 8898817eaf8b..155b8b1a8f4b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -971,7 +971,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) max_cmd = instance->max_fw_cmds; - fusion->reply_q_depth = ((max_cmd + 1 + 15)/16)*16; + fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16); fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd; @@ -1874,6 +1874,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) u32 status, extStatus, device_id; union desc_value d_val; struct LD_LOAD_BALANCE_INFO *lbinfo; + int threshold_reply_count = 0; fusion = instance->ctrl_context; @@ -1961,6 +1962,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) desc->Words = ULLONG_MAX; num_completed++; + threshold_reply_count++; /* Get the next reply descriptor */ if (!fusion->last_reply_idx[MSIxIndex]) @@ -1980,6 +1982,25 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) break; + /* + * Write to reply post host index register after completing threshold + * number of reply counts and still there are more replies in reply queue + * pending to be completed + */ + if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_FURY)) + writel(((MSIxIndex & 0x7) << 24) | + fusion->last_reply_idx[MSIxIndex], + instance->reply_post_host_index_addr[MSIxIndex/8]); + else + writel((MSIxIndex << 24) | + fusion->last_reply_idx[MSIxIndex], + instance->reply_post_host_index_addr[0]); + threshold_reply_count = 0; + } } if (!num_completed) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index e76af5459a09..d660c4df4153 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -86,6 +86,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { #define MEGASAS_FP_CMD_LEN 16 #define MEGASAS_FUSION_IN_RESET 0 +#define THRESHOLD_REPLY_COUNT 50 /* * Raid Context structure which describes MegaRAID specific IO Parameters -- cgit From fc62b3fc9021526d096d940ec62e74af72eb1e10 Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Fri, 12 Sep 2014 18:57:28 +0530 Subject: megaraid_sas : Firmware crash dump feature support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Resending the patch. Addressed the review comments from Tomas Henzl. Move buff_offset inside spinlock, corrected loop at crash dump buffer free, reset_devices check is added to disable fw crash dump feature in kdump kernel. This feature will provide similar interface as kernel crash dump feature. When megaraid firmware encounter any crash, driver will collect the firmware raw image and dump it into pre-configured location. Driver will allocate two different segment of memory. #1 Non-DMA able large buffer (will be allocated on demand) to capture actual FW crash dump. #2 DMA buffer (persistence allocation) just to do a arbitrator job. Firmware will keep writing Crash dump data in chucks of DMA buffer size into #2, which will be copy back by driver to the host memory as described in #1. Driver-Firmware interface: ================== A.) Host driver can allocate maximum 512MB Host memory to store crash dump data. This memory will be internal to the host and will not be exposed to the Firmware. Driver may not be able to allocate 512 MB. In that case, driver will do possible memory (available at run time) allocation to store crash dump data. Let’s call this buffer as Host Crash Buffer. Host Crash buffer will not be contigious as a whole, but it will have multiple chunk of contigious memory. This will be internal to driver and firmware/application are unaware of it. Partial allocation of Host Crash buffer may have valid information to debug depending upon what was collected in that buffer and depending on nature of failure. Complete Crash dump is the best case, but we do want to capture partial buffer just to grab something rather than nothing. Host Crash buffer will be allocated only when FW Crash dump data is available, and will be deallocated once application copy Host Crash buffer to the file. Host Crash buffer size can be anything between 1MB to 512MB. (It will be multiple of 1MBs) B.) Irrespective of underlying Firmware capability of crash dump support, driver will allocate DMA buffer at start of the day for each MR controllers. Let’s call this buffer as “DMA Crash Buffer”. For this feature, size of DMA crash buffer will be 1MB. (We will not gain much even if DMA buffer size is increased.) C.) Driver will now read Controller Info sending existing dcmd “MR_DCMD_CTRL_GET_INFO”. Driver should extract the information from ctrl info provided by firmware and figure out if firmware support crash dump feature or not. Driver will enable crash dump feature only if “Firmware support Crash dump” + “Driver was able to create DMA Crash Buffer”. If either one from above is not set, Crash dump feature should be disable in driver. Firmware will enable crash dump feature only if “Driver Send DCMD- MR_DCMD_SET_CRASH_BUF_PARA with MR_CRASH_BUF_TURN_ON” Helper application/script should use sysfs parameter fw_crash_xxx to actually copy data from host memory to the filesystem. Signed-off-by: Sumit Saxena Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas.h | 58 +++++- drivers/scsi/megaraid/megaraid_sas_base.c | 294 +++++++++++++++++++++++++++- drivers/scsi/megaraid/megaraid_sas_fusion.c | 172 +++++++++++++++- 3 files changed, 519 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index bc7adcf537f2..e0f03e2f6ddf 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -105,6 +105,9 @@ #define MFI_STATE_READY 0xB0000000 #define MFI_STATE_OPERATIONAL 0xC0000000 #define MFI_STATE_FAULT 0xF0000000 +#define MFI_STATE_FORCE_OCR 0x00000080 +#define MFI_STATE_DMADONE 0x00000008 +#define MFI_STATE_CRASH_DUMP_DONE 0x00000004 #define MFI_RESET_REQUIRED 0x00000001 #define MFI_RESET_ADAPTER 0x00000002 #define MEGAMFI_FRAME_SIZE 64 @@ -191,6 +194,9 @@ #define MR_DCMD_CLUSTER_RESET_LD 0x08010200 #define MR_DCMD_PD_LIST_QUERY 0x02010100 +#define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS 0x01190100 +#define MR_DRIVER_SET_APP_CRASHDUMP_MODE (0xF0010000 | 0x0600) + /* * Global functions */ @@ -263,6 +269,25 @@ enum MFI_STAT { MFI_STAT_INVALID_STATUS = 0xFF }; +/* + * Crash dump related defines + */ +#define MAX_CRASH_DUMP_SIZE 512 +#define CRASH_DMA_BUF_SIZE (1024 * 1024) + +enum MR_FW_CRASH_DUMP_STATE { + UNAVAILABLE = 0, + AVAILABLE = 1, + COPYING = 2, + COPIED = 3, + COPY_ERROR = 4, +}; + +enum _MR_CRASH_BUF_STATUS { + MR_CRASH_BUF_TURN_OFF = 0, + MR_CRASH_BUF_TURN_ON = 1, +}; + /* * Number of mailbox bytes in DCMD message frame */ @@ -933,7 +958,19 @@ struct megasas_ctrl_info { u8 reserved; /*0x7E7*/ } iov; - u8 pad[0x800-0x7E8]; /*0x7E8 pad to 2k */ + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:25; + u32 supportCrashDump:1; + u32 reserved1:6; +#else + u32 reserved1:6; + u32 supportCrashDump:1; + u32 reserved:25; +#endif + } adapterOperations3; + + u8 pad[0x800-0x7EC]; } __packed; /* @@ -1559,6 +1596,20 @@ struct megasas_instance { u32 *reply_queue; dma_addr_t reply_queue_h; + u32 *crash_dump_buf; + dma_addr_t crash_dump_h; + void *crash_buf[MAX_CRASH_DUMP_SIZE]; + u32 crash_buf_pages; + unsigned int fw_crash_buffer_size; + unsigned int fw_crash_state; + unsigned int fw_crash_buffer_offset; + u32 drv_buf_index; + u32 drv_buf_alloc; + u32 crash_dump_fw_support; + u32 crash_dump_drv_support; + u32 crash_dump_app_support; + spinlock_t crashdump_lock; + struct megasas_register_set __iomem *reg_set; u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; @@ -1606,6 +1657,7 @@ struct megasas_instance { struct megasas_instance_template *instancet; struct tasklet_struct isr_tasklet; struct work_struct work_init; + struct work_struct crash_init; u8 flag; u8 unload; @@ -1830,4 +1882,8 @@ u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map); u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map); u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); +int megasas_set_crash_dump_params(struct megasas_instance *instance, + u8 crash_buf_state); +void megasas_free_host_crash_buffer(struct megasas_instance *instance); +void megasas_fusion_crash_dump_wq(struct work_struct *work); #endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index a894f13c246f..ff96e3c58fbf 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2560,6 +2560,152 @@ static int megasas_change_queue_depth(struct scsi_device *sdev, return queue_depth; } +static ssize_t +megasas_fw_crash_buffer_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *) shost->hostdata; + int val = 0; + unsigned long flags; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + + spin_lock_irqsave(&instance->crashdump_lock, flags); + instance->fw_crash_buffer_offset = val; + spin_unlock_irqrestore(&instance->crashdump_lock, flags); + return strlen(buf); +} + +static ssize_t +megasas_fw_crash_buffer_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *) shost->hostdata; + u32 size; + unsigned long buff_addr; + unsigned long dmachunk = CRASH_DMA_BUF_SIZE; + unsigned long src_addr; + unsigned long flags; + u32 buff_offset; + + spin_lock_irqsave(&instance->crashdump_lock, flags); + buff_offset = instance->fw_crash_buffer_offset; + if (!instance->crash_dump_buf && + !((instance->fw_crash_state == AVAILABLE) || + (instance->fw_crash_state == COPYING))) { + dev_err(&instance->pdev->dev, + "Firmware crash dump is not available\n"); + spin_unlock_irqrestore(&instance->crashdump_lock, flags); + return -EINVAL; + } + + buff_addr = (unsigned long) buf; + + if (buff_offset > + (instance->fw_crash_buffer_size * dmachunk)) { + dev_err(&instance->pdev->dev, + "Firmware crash dump offset is out of range\n"); + spin_unlock_irqrestore(&instance->crashdump_lock, flags); + return 0; + } + + size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; + size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; + + src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + + (buff_offset % dmachunk); + memcpy(buf, (void *)src_addr, size); + spin_unlock_irqrestore(&instance->crashdump_lock, flags); + + return size; +} + +static ssize_t +megasas_fw_crash_buffer_size_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *) shost->hostdata; + + return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) + ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); +} + +static ssize_t +megasas_fw_crash_state_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *) shost->hostdata; + int val = 0; + unsigned long flags; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + + if ((val <= AVAILABLE || val > COPY_ERROR)) { + dev_err(&instance->pdev->dev, "application updates invalid " + "firmware crash state\n"); + return -EINVAL; + } + + instance->fw_crash_state = val; + + if ((val == COPIED) || (val == COPY_ERROR)) { + spin_lock_irqsave(&instance->crashdump_lock, flags); + megasas_free_host_crash_buffer(instance); + spin_unlock_irqrestore(&instance->crashdump_lock, flags); + if (val == COPY_ERROR) + dev_info(&instance->pdev->dev, "application failed to " + "copy Firmware crash dump\n"); + else + dev_info(&instance->pdev->dev, "Firmware crash dump " + "copied successfully\n"); + } + return strlen(buf); +} + +static ssize_t +megasas_fw_crash_state_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *) shost->hostdata; + return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); +} + +static ssize_t +megasas_page_size_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); +} + +static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR, + megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store); +static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO, + megasas_fw_crash_buffer_size_show, NULL); +static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR, + megasas_fw_crash_state_show, megasas_fw_crash_state_store); +static DEVICE_ATTR(page_size, S_IRUGO, + megasas_page_size_show, NULL); + +struct device_attribute *megaraid_host_attrs[] = { + &dev_attr_fw_crash_buffer_size, + &dev_attr_fw_crash_buffer, + &dev_attr_fw_crash_state, + &dev_attr_page_size, + NULL, +}; + /* * Scsi host template for megaraid_sas driver */ @@ -2575,6 +2721,7 @@ static struct scsi_host_template megasas_template = { .eh_bus_reset_handler = megasas_reset_bus_host, .eh_host_reset_handler = megasas_reset_bus_host, .eh_timed_out = megasas_reset_timer, + .shost_attrs = megaraid_host_attrs, .bios_param = megasas_bios_param, .use_clustering = ENABLE_CLUSTERING, .change_queue_depth = megasas_change_queue_depth, @@ -3887,6 +4034,59 @@ megasas_get_ctrl_info(struct megasas_instance *instance, return ret; } +/* + * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer + * to firmware + * + * @instance: Adapter soft state + * @crash_buf_state - tell FW to turn ON/OFF crash dump feature + MR_CRASH_BUF_TURN_OFF = 0 + MR_CRASH_BUF_TURN_ON = 1 + * @return 0 on success non-zero on failure. + * Issues an internal command (DCMD) to set parameters for crash dump feature. + * Driver will send address of crash dump DMA buffer and set mbox to tell FW + * that driver supports crash dump feature. This DCMD will be sent only if + * crash dump feature is supported by the FW. + * + */ +int megasas_set_crash_dump_params(struct megasas_instance *instance, + u8 crash_buf_state) +{ + int ret = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); + return -ENOMEM; + } + + + dcmd = &cmd->frame->dcmd; + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + dcmd->mbox.b[0] = crash_buf_state; + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE); + + if (!megasas_issue_polled(instance, cmd)) + ret = 0; + else + ret = -1; + megasas_return_cmd(instance, cmd); + return ret; +} + /** * megasas_issue_init_mfi - Initializes the FW * @instance: Adapter soft state @@ -4272,6 +4472,27 @@ static int megasas_init_fw(struct megasas_instance *instance) printk(KERN_WARNING "megaraid_sas: I am VF " "requestorId %d\n", instance->requestorId); } + + le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); + instance->crash_dump_fw_support = + ctrl_info->adapterOperations3.supportCrashDump; + instance->crash_dump_drv_support = + (instance->crash_dump_fw_support && + instance->crash_dump_buf); + if (instance->crash_dump_drv_support) { + dev_info(&instance->pdev->dev, "Firmware Crash dump " + "feature is supported\n"); + megasas_set_crash_dump_params(instance, + MR_CRASH_BUF_TURN_OFF); + + } else { + if (instance->crash_dump_buf) + pci_free_consistent(instance->pdev, + CRASH_DMA_BUF_SIZE, + instance->crash_dump_buf, + instance->crash_dump_h); + instance->crash_dump_buf = NULL; + } } instance->max_sectors_per_req = instance->max_num_sge * PAGE_SIZE / 512; @@ -4791,6 +5012,23 @@ static int megasas_probe_one(struct pci_dev *pdev, break; } + /* Crash dump feature related initialisation*/ + instance->drv_buf_index = 0; + instance->drv_buf_alloc = 0; + instance->crash_dump_fw_support = 0; + instance->crash_dump_app_support = 0; + instance->fw_crash_state = UNAVAILABLE; + spin_lock_init(&instance->crashdump_lock); + instance->crash_dump_buf = NULL; + + if (!reset_devices) + instance->crash_dump_buf = pci_alloc_consistent(pdev, + CRASH_DMA_BUF_SIZE, + &instance->crash_dump_h); + if (!instance->crash_dump_buf) + dev_err(&instance->pdev->dev, "Can't allocate Firmware " + "crash dump DMA buffer\n"); + megasas_poll_wait_aen = 0; instance->flag_ieee = 0; instance->ev = NULL; @@ -4852,9 +5090,10 @@ static int megasas_probe_one(struct pci_dev *pdev, if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); - else + INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq); + } else INIT_WORK(&instance->work_init, process_fw_state_change_wq); /* @@ -5342,6 +5581,8 @@ static void megasas_detach_one(struct pci_dev *pdev) if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); + if (instance->fw_crash_state != UNAVAILABLE) + megasas_free_host_crash_buffer(instance); scsi_remove_host(instance->host); megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); @@ -5432,6 +5673,10 @@ static void megasas_detach_one(struct pci_dev *pdev) instance->hb_host_mem, instance->hb_host_mem_h); + if (instance->crash_dump_buf) + pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE, + instance->crash_dump_buf, instance->crash_dump_h); + scsi_host_put(host); pci_disable_device(pdev); @@ -5523,6 +5768,45 @@ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait) return mask; } +/* + * megasas_set_crash_dump_params_ioctl: + * Send CRASH_DUMP_MODE DCMD to all controllers + * @cmd: MFI command frame + */ + +static int megasas_set_crash_dump_params_ioctl( + struct megasas_cmd *cmd) +{ + struct megasas_instance *local_instance; + int i, error = 0; + int crash_support; + + crash_support = cmd->frame->dcmd.mbox.w[0]; + + for (i = 0; i < megasas_mgmt_info.max_index; i++) { + local_instance = megasas_mgmt_info.instance[i]; + if (local_instance && local_instance->crash_dump_drv_support) { + if ((local_instance->adprecovery == + MEGASAS_HBA_OPERATIONAL) && + !megasas_set_crash_dump_params(local_instance, + crash_support)) { + local_instance->crash_dump_app_support = + crash_support; + dev_info(&local_instance->pdev->dev, + "Application firmware crash " + "dump mode set success\n"); + error = 0; + } else { + dev_info(&local_instance->pdev->dev, + "Application firmware crash " + "dump mode set failed\n"); + error = -1; + } + } + } + return error; +} + /** * megasas_mgmt_fw_ioctl - Issues management ioctls to FW * @instance: Adapter soft state @@ -5569,6 +5853,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, MFI_FRAME_SGL64 | MFI_FRAME_SENSE64)); + if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { + error = megasas_set_crash_dump_params_ioctl(cmd); + megasas_return_cmd(instance, cmd); + return error; + } + /* * The management interface between applications and the fw uses * MFI frames. E.g, RAID configuration changes, LD property changes diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 155b8b1a8f4b..913e9fa8fc15 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -91,6 +91,8 @@ void megasas_start_timer(struct megasas_instance *instance, extern struct megasas_mgmt_info megasas_mgmt_info; extern int resetwaittime; + + /** * megasas_enable_intr_fusion - Enables interrupts * @regs: MFI register set @@ -2055,7 +2057,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp) { struct megasas_irq_context *irq_context = devp; struct megasas_instance *instance = irq_context->instance; - u32 mfiStatus, fw_state; + u32 mfiStatus, fw_state, dma_state; if (instance->mask_interrupts) return IRQ_NONE; @@ -2077,7 +2079,16 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp) /* If we didn't complete any commands, check for FW fault */ fw_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; - if (fw_state == MFI_STATE_FAULT) { + dma_state = instance->instancet->read_fw_status_reg + (instance->reg_set) & MFI_STATE_DMADONE; + if (instance->crash_dump_drv_support && + instance->crash_dump_app_support) { + /* Start collecting crash, if DMA bit is done */ + if ((fw_state == MFI_STATE_FAULT) && dma_state) + schedule_work(&instance->crash_init); + else if (fw_state == MFI_STATE_FAULT) + schedule_work(&instance->work_init); + } else if (fw_state == MFI_STATE_FAULT) { printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt" "for scsi%d\n", instance->host->host_no); schedule_work(&instance->work_init); @@ -2229,6 +2240,49 @@ megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs) return readl(&(regs)->outbound_scratch_pad); } +/** + * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware + * @instance: Controller's soft instance + * return: Number of allocated host crash buffers + */ +static void +megasas_alloc_host_crash_buffer(struct megasas_instance *instance) +{ + unsigned int i; + + instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE); + for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) { + instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL, + instance->crash_buf_pages); + if (!instance->crash_buf[i]) { + dev_info(&instance->pdev->dev, "Firmware crash dump " + "memory allocation failed at index %d\n", i); + break; + } + } + instance->drv_buf_alloc = i; +} + +/** + * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware + * @instance: Controller's soft instance + */ +void +megasas_free_host_crash_buffer(struct megasas_instance *instance) +{ + unsigned int i +; + for (i = 0; i < instance->drv_buf_alloc; i++) { + if (instance->crash_buf[i]) + free_pages((ulong)instance->crash_buf[i], + instance->crash_buf_pages); + } + instance->drv_buf_index = 0; + instance->drv_buf_alloc = 0; + instance->fw_crash_state = UNAVAILABLE; + instance->fw_crash_buffer_size = 0; +} + /** * megasas_adp_reset_fusion - For controller reset * @regs: MFI register set @@ -2372,6 +2426,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) struct megasas_cmd *cmd_mfi; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; u32 host_diag, abs_state, status_reg, reset_adapter; + u32 io_timeout_in_crash_mode = 0; instance = (struct megasas_instance *)shost->hostdata; fusion = instance->ctrl_context; @@ -2385,6 +2440,42 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) mutex_unlock(&instance->reset_mutex); return FAILED; } + status_reg = instance->instancet->read_fw_status_reg(instance->reg_set); + abs_state = status_reg & MFI_STATE_MASK; + + /* IO timeout detected, forcibly put FW in FAULT state */ + if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf && + instance->crash_dump_app_support && iotimeout) { + dev_info(&instance->pdev->dev, "IO timeout is detected, " + "forcibly FAULT Firmware\n"); + instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; + status_reg = readl(&instance->reg_set->doorbell); + writel(status_reg | MFI_STATE_FORCE_OCR, + &instance->reg_set->doorbell); + readl(&instance->reg_set->doorbell); + mutex_unlock(&instance->reset_mutex); + do { + ssleep(3); + io_timeout_in_crash_mode++; + dev_dbg(&instance->pdev->dev, "waiting for [%d] " + "seconds for crash dump collection and OCR " + "to be done\n", (io_timeout_in_crash_mode * 3)); + } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && + (io_timeout_in_crash_mode < 80)); + + if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { + dev_info(&instance->pdev->dev, "OCR done for IO " + "timeout case\n"); + retval = SUCCESS; + } else { + dev_info(&instance->pdev->dev, "Controller is not " + "operational after 240 seconds wait for IO " + "timeout case in FW crash dump mode\n do " + "OCR/kill adapter\n"); + retval = megasas_reset_fusion(shost, 0); + } + return retval; + } if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); @@ -2651,6 +2742,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) printk(KERN_WARNING "megaraid_sas: Reset " "successful for scsi%d.\n", instance->host->host_no); + + if (instance->crash_dump_drv_support) { + if (instance->crash_dump_app_support) + megasas_set_crash_dump_params(instance, + MR_CRASH_BUF_TURN_ON); + else + megasas_set_crash_dump_params(instance, + MR_CRASH_BUF_TURN_OFF); + } retval = SUCCESS; goto out; } @@ -2679,6 +2779,74 @@ out: return retval; } +/* Fusion Crash dump collection work queue */ +void megasas_fusion_crash_dump_wq(struct work_struct *work) +{ + struct megasas_instance *instance = + container_of(work, struct megasas_instance, crash_init); + u32 status_reg; + u8 partial_copy = 0; + + + status_reg = instance->instancet->read_fw_status_reg(instance->reg_set); + + /* + * Allocate host crash buffers to copy data from 1 MB DMA crash buffer + * to host crash buffers + */ + if (instance->drv_buf_index == 0) { + /* Buffer is already allocated for old Crash dump. + * Do OCR and do not wait for crash dump collection + */ + if (instance->drv_buf_alloc) { + dev_info(&instance->pdev->dev, "earlier crash dump is " + "not yet copied by application, ignoring this " + "crash dump and initiating OCR\n"); + status_reg |= MFI_STATE_CRASH_DUMP_DONE; + writel(status_reg, + &instance->reg_set->outbound_scratch_pad); + readl(&instance->reg_set->outbound_scratch_pad); + return; + } + megasas_alloc_host_crash_buffer(instance); + dev_info(&instance->pdev->dev, "Number of host crash buffers " + "allocated: %d\n", instance->drv_buf_alloc); + } + + /* + * Driver has allocated max buffers, which can be allocated + * and FW has more crash dump data, then driver will + * ignore the data. + */ + if (instance->drv_buf_index >= (instance->drv_buf_alloc)) { + dev_info(&instance->pdev->dev, "Driver is done copying " + "the buffer: %d\n", instance->drv_buf_alloc); + status_reg |= MFI_STATE_CRASH_DUMP_DONE; + partial_copy = 1; + } else { + memcpy(instance->crash_buf[instance->drv_buf_index], + instance->crash_dump_buf, CRASH_DMA_BUF_SIZE); + instance->drv_buf_index++; + status_reg &= ~MFI_STATE_DMADONE; + } + + if (status_reg & MFI_STATE_CRASH_DUMP_DONE) { + dev_info(&instance->pdev->dev, "Crash Dump is available,number " + "of copied buffers: %d\n", instance->drv_buf_index); + instance->fw_crash_buffer_size = instance->drv_buf_index; + instance->fw_crash_state = AVAILABLE; + instance->drv_buf_index = 0; + writel(status_reg, &instance->reg_set->outbound_scratch_pad); + readl(&instance->reg_set->outbound_scratch_pad); + if (!partial_copy) + megasas_reset_fusion(instance->host, 0); + } else { + writel(status_reg, &instance->reg_set->outbound_scratch_pad); + readl(&instance->reg_set->outbound_scratch_pad); + } +} + + /* Fusion OCR work queue */ void megasas_fusion_ocr_wq(struct work_struct *work) { -- cgit From 51087a8617fef1fb15e5b7a8805cfbab3583944d Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Fri, 12 Sep 2014 18:57:33 +0530 Subject: megaraid_sas : Extended VD support Resending the patch. Addressed the review comments from Tomas Henzl. reserved1 field(part of union) of Raid map struct was not required so it is removed. Current MegaRAID firmware and hence the driver only supported 64VDs. E.g: If the user wants to create more than 64VD on a controller, it is not possible on current firmware/driver. New feature and requirement to support upto 256VD, firmware/driver/apps need changes. In addition to that there must be a backward compatibility of the new driver with the older firmware and vice versa. RAID map is the interface between Driver and FW to fetch all required fields(attributes) for each Virtual Drives. In the earlier design driver was using the FW copy of RAID map where as in the new design the Driver will keep the RAID map copy of its own; on which it will operate for any raid map access in fast path. Local driver raid map copy will provide ease of access through out the code and provide generic interface for future FW raid map changes. For the backward compatibility driver will notify FW that it supports 256VD to the FW in driver capability field. Based on the controller properly returned by the FW, the Driver will know whether it supports 256VD or not and will copy the RAID map accordingly. At any given time, driver will always have old or new Raid map. So with this changes, driver can also work in host lock less mode. Please see next patch which enable host lock less mode for megaraid_sas driver. Signed-off-by: Sumit Saxena Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas.h | 73 +++++++--- drivers/scsi/megaraid/megaraid_sas_base.c | 205 ++++++++++++++++------------ drivers/scsi/megaraid/megaraid_sas_fp.c | 195 ++++++++++++++++++-------- drivers/scsi/megaraid/megaraid_sas_fusion.c | 118 ++++++++++++---- drivers/scsi/megaraid/megaraid_sas_fusion.h | 94 ++++++++++++- 5 files changed, 500 insertions(+), 185 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index e0f03e2f6ddf..5dedf09fc46d 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -390,7 +390,6 @@ enum MR_LD_QUERY_TYPE { #define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db #define MR_EVT_LD_OFFLINE 0x00fc #define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152 -#define MAX_LOGICAL_DRIVES 64 enum MR_PD_STATE { MR_PD_STATE_UNCONFIGURED_GOOD = 0x00, @@ -468,14 +467,14 @@ struct MR_LD_LIST { u8 state; u8 reserved[3]; u64 size; - } ldList[MAX_LOGICAL_DRIVES]; + } ldList[MAX_LOGICAL_DRIVES_EXT]; } __packed; struct MR_LD_TARGETID_LIST { u32 size; u32 count; u8 pad[3]; - u8 targetId[MAX_LOGICAL_DRIVES]; + u8 targetId[MAX_LOGICAL_DRIVES_EXT]; }; @@ -941,6 +940,15 @@ struct megasas_ctrl_info { * HA cluster information */ struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:26; + u32 premiumFeatureMismatch:1; + u32 ctrlPropIncompatible:1; + u32 fwVersionMismatch:1; + u32 hwIncompatible:1; + u32 peerIsIncompatible:1; + u32 peerIsPresent:1; +#else u32 peerIsPresent:1; u32 peerIsIncompatible:1; u32 hwIncompatible:1; @@ -948,6 +956,7 @@ struct megasas_ctrl_info { u32 ctrlPropIncompatible:1; u32 premiumFeatureMismatch:1; u32 reserved:26; +#endif } cluster; char clusterId[16]; /*7D4h */ @@ -962,9 +971,17 @@ struct megasas_ctrl_info { #if defined(__BIG_ENDIAN_BITFIELD) u32 reserved:25; u32 supportCrashDump:1; - u32 reserved1:6; + u32 supportMaxExtLDs:1; + u32 supportT10RebuildAssist:1; + u32 supportDisableImmediateIO:1; + u32 supportThermalPollInterval:1; + u32 supportPersonalityChange:2; #else - u32 reserved1:6; + u32 supportPersonalityChange:2; + u32 supportThermalPollInterval:1; + u32 supportDisableImmediateIO:1; + u32 supportT10RebuildAssist:1; + u32 supportMaxExtLDs:1; u32 supportCrashDump:1; u32 reserved:25; #endif @@ -979,13 +996,12 @@ struct megasas_ctrl_info { * =============================== */ #define MEGASAS_MAX_PD_CHANNELS 2 -#define MEGASAS_MAX_LD_CHANNELS 1 +#define MEGASAS_MAX_LD_CHANNELS 2 #define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ MEGASAS_MAX_LD_CHANNELS) #define MEGASAS_MAX_DEV_PER_CHANNEL 128 #define MEGASAS_DEFAULT_INIT_ID -1 #define MEGASAS_MAX_LUN 8 -#define MEGASAS_MAX_LD 64 #define MEGASAS_DEFAULT_CMD_PER_LUN 256 #define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ MEGASAS_MAX_DEV_PER_CHANNEL) @@ -998,6 +1014,8 @@ struct megasas_ctrl_info { #define MEGASAS_FW_BUSY 1 +#define VD_EXT_DEBUG 0 + /* Frame Type */ #define IO_FRAME 0 #define PTHRU_FRAME 1 @@ -1170,13 +1188,17 @@ union megasas_sgl_frame { typedef union _MFI_CAPABILITIES { struct { #if defined(__BIG_ENDIAN_BITFIELD) - u32 reserved:30; + u32 reserved:28; + u32 support_max_255lds:1; + u32 reserved1:1; u32 support_additional_msix:1; u32 support_fp_remote_lun:1; #else u32 support_fp_remote_lun:1; u32 support_additional_msix:1; - u32 reserved:30; + u32 reserved1:1; + u32 support_max_255lds:1; + u32 reserved:28; #endif } mfi_capabilities; u32 reg; @@ -1665,6 +1687,14 @@ struct megasas_instance { u8 issuepend_done; u8 disableOnlineCtrlReset; u8 UnevenSpanSupport; + + u8 supportmax256vd; + u16 fw_supported_vd_count; + u16 fw_supported_pd_count; + + u16 drv_supported_vd_count; + u16 drv_supported_pd_count; + u8 adprecovery; unsigned long last_time; u32 mfiStatus; @@ -1674,6 +1704,8 @@ struct megasas_instance { /* Ptr to hba specific information */ void *ctrl_context; + u32 ctrl_context_pages; + struct megasas_ctrl_info *ctrl_info; unsigned int msix_vectors; struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES]; struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES]; @@ -1874,16 +1906,21 @@ u8 MR_BuildRaidContext(struct megasas_instance *instance, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, - struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN); -u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); -struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); -u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map); -u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map); -u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map); -u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); - + struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN); +u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map); +struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); +u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map); +u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map); +u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map); +u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); + +void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, + struct LD_LOAD_BALANCE_INFO *lbInfo); +int megasas_get_ctrl_info(struct megasas_instance *instance, + struct megasas_ctrl_info *ctrl_info); int megasas_set_crash_dump_params(struct megasas_instance *instance, - u8 crash_buf_state); + u8 crash_buf_state); void megasas_free_host_crash_buffer(struct megasas_instance *instance); void megasas_fusion_crash_dump_wq(struct work_struct *work); + #endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index ff96e3c58fbf..c7ddb129349d 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1581,7 +1581,8 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd scmd->result = 0; if (MEGASAS_IS_LOGICAL(scmd) && - (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) { + (scmd->device->id >= instance->fw_supported_vd_count || + scmd->device->lun)) { scmd->result = DID_BAD_TARGET << 16; goto out_done; } @@ -3846,6 +3847,8 @@ megasas_get_ld_list(struct megasas_instance *instance) memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + if (instance->supportmax256vd) + dcmd->mbox.b[0] = 1; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; @@ -3867,8 +3870,8 @@ megasas_get_ld_list(struct megasas_instance *instance) /* the following function will get the instance PD LIST */ - if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) { - memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); + if ((ret == 0) && (ld_count <= instance->fw_supported_vd_count)) { + memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); for (ld_index = 0; ld_index < ld_count; ld_index++) { if (ci->ldList[ld_index].state != 0) { @@ -3931,6 +3934,8 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = query_type; + if (instance->supportmax256vd) + dcmd->mbox.b[2] = 1; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; @@ -3952,7 +3957,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) tgtid_count = le32_to_cpu(ci->count); - if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) { + if ((ret == 0) && (tgtid_count <= (instance->fw_supported_vd_count))) { memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); for (ld_index = 0; ld_index < tgtid_count; ld_index++) { ids = ci->targetId[ld_index]; @@ -3978,7 +3983,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) * This information is mainly used to find out the maximum IO transfer per * command supported by the FW. */ -static int +int megasas_get_ctrl_info(struct megasas_instance *instance, struct megasas_ctrl_info *ctrl_info) { @@ -4019,6 +4024,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance, dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); + dcmd->mbox.b[0] = 1; if (!megasas_issue_polled(instance, cmd)) { ret = 0; @@ -4217,6 +4223,13 @@ megasas_init_adapter_mfi(struct megasas_instance *instance) if (megasas_issue_init_mfi(instance)) goto fail_fw_init; + if (megasas_get_ctrl_info(instance, instance->ctrl_info)) { + dev_err(&instance->pdev->dev, "(%d): Could get controller info " + "Fail from %s %d\n", instance->unique_id, + __func__, __LINE__); + goto fail_fw_init; + } + instance->fw_support_ieee = 0; instance->fw_support_ieee = (instance->instancet->read_fw_status_reg(reg_set) & @@ -4255,7 +4268,7 @@ static int megasas_init_fw(struct megasas_instance *instance) u32 tmp_sectors, msix_enable, scratch_pad_2; resource_size_t base_addr; struct megasas_register_set __iomem *reg_set; - struct megasas_ctrl_info *ctrl_info; + struct megasas_ctrl_info *ctrl_info = NULL; unsigned long bar_list; int i, loop, fw_msix_count = 0; struct IOV_111 *iovPtr; @@ -4386,6 +4399,17 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->msix_vectors); } + instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info), + GFP_KERNEL); + if (instance->ctrl_info == NULL) + goto fail_init_adapter; + + /* + * Below are default value for legacy Firmware. + * non-fusion based controllers + */ + instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; + instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; /* Get operational params, sge flags, send init cmd to controller */ if (instance->instancet->init_adapter(instance)) goto fail_init_adapter; @@ -4408,8 +4432,6 @@ static int megasas_init_fw(struct megasas_instance *instance) MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) megasas_get_ld_list(instance); - ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); - /* * Compute the max allowed sectors per IO: The controller info has two * limits on max sectors. Driver should use the minimum of these two. @@ -4420,79 +4442,79 @@ static int megasas_init_fw(struct megasas_instance *instance) * to calculate max_sectors_1. So the number ended up as zero always. */ tmp_sectors = 0; - if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { - - max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * - le16_to_cpu(ctrl_info->max_strips_per_io); - max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); + ctrl_info = instance->ctrl_info; - tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); + max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * + le16_to_cpu(ctrl_info->max_strips_per_io); + max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); - /*Check whether controller is iMR or MR */ - if (ctrl_info->memory_size) { - instance->is_imr = 0; - dev_info(&instance->pdev->dev, "Controller type: MR," - "Memory size is: %dMB\n", - le16_to_cpu(ctrl_info->memory_size)); - } else { - instance->is_imr = 1; - dev_info(&instance->pdev->dev, - "Controller type: iMR\n"); - } - /* OnOffProperties are converted into CPU arch*/ - le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); - instance->disableOnlineCtrlReset = - ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; - /* adapterOperations2 are converted into CPU arch*/ - le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); - instance->mpio = ctrl_info->adapterOperations2.mpio; - instance->UnevenSpanSupport = - ctrl_info->adapterOperations2.supportUnevenSpans; - if (instance->UnevenSpanSupport) { - struct fusion_context *fusion = instance->ctrl_context; - dev_info(&instance->pdev->dev, "FW supports: " - "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport); - if (MR_ValidateMapInfo(instance)) - fusion->fast_path_io = 1; - else - fusion->fast_path_io = 0; + tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); - } - if (ctrl_info->host_interface.SRIOV) { - if (!ctrl_info->adapterOperations2.activePassive) - instance->PlasmaFW111 = 1; - - if (!instance->PlasmaFW111) - instance->requestorId = - ctrl_info->iov.requestorId; - else { - iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET); - instance->requestorId = iovPtr->requestorId; - } - printk(KERN_WARNING "megaraid_sas: I am VF " - "requestorId %d\n", instance->requestorId); - } + /*Check whether controller is iMR or MR */ + if (ctrl_info->memory_size) { + instance->is_imr = 0; + dev_info(&instance->pdev->dev, "Controller type: MR," + "Memory size is: %dMB\n", + le16_to_cpu(ctrl_info->memory_size)); + } else { + instance->is_imr = 1; + dev_info(&instance->pdev->dev, + "Controller type: iMR\n"); + } + /* OnOffProperties are converted into CPU arch*/ + le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); + instance->disableOnlineCtrlReset = + ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; + /* adapterOperations2 are converted into CPU arch*/ + le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); + instance->mpio = ctrl_info->adapterOperations2.mpio; + instance->UnevenSpanSupport = + ctrl_info->adapterOperations2.supportUnevenSpans; + if (instance->UnevenSpanSupport) { + struct fusion_context *fusion = instance->ctrl_context; + + dev_info(&instance->pdev->dev, "FW supports: " + "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport); + if (MR_ValidateMapInfo(instance)) + fusion->fast_path_io = 1; + else + fusion->fast_path_io = 0; - le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); - instance->crash_dump_fw_support = - ctrl_info->adapterOperations3.supportCrashDump; - instance->crash_dump_drv_support = - (instance->crash_dump_fw_support && - instance->crash_dump_buf); - if (instance->crash_dump_drv_support) { - dev_info(&instance->pdev->dev, "Firmware Crash dump " - "feature is supported\n"); - megasas_set_crash_dump_params(instance, - MR_CRASH_BUF_TURN_OFF); + } + if (ctrl_info->host_interface.SRIOV) { + if (!ctrl_info->adapterOperations2.activePassive) + instance->PlasmaFW111 = 1; - } else { - if (instance->crash_dump_buf) - pci_free_consistent(instance->pdev, - CRASH_DMA_BUF_SIZE, - instance->crash_dump_buf, - instance->crash_dump_h); - instance->crash_dump_buf = NULL; + if (!instance->PlasmaFW111) + instance->requestorId = + ctrl_info->iov.requestorId; + else { + iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET); + instance->requestorId = iovPtr->requestorId; } + dev_warn(&instance->pdev->dev, "I am VF " + "requestorId %d\n", instance->requestorId); + } + + le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); + instance->crash_dump_fw_support = + ctrl_info->adapterOperations3.supportCrashDump; + instance->crash_dump_drv_support = + (instance->crash_dump_fw_support && + instance->crash_dump_buf); + if (instance->crash_dump_drv_support) { + dev_info(&instance->pdev->dev, "Firmware Crash dump " + "feature is supported\n"); + megasas_set_crash_dump_params(instance, + MR_CRASH_BUF_TURN_OFF); + + } else { + if (instance->crash_dump_buf) + pci_free_consistent(instance->pdev, + CRASH_DMA_BUF_SIZE, + instance->crash_dump_buf, + instance->crash_dump_h); + instance->crash_dump_buf = NULL; } instance->max_sectors_per_req = instance->max_num_sge * PAGE_SIZE / 512; @@ -4540,6 +4562,8 @@ static int megasas_init_fw(struct megasas_instance *instance) fail_init_adapter: fail_ready_state: + kfree(instance->ctrl_info); + instance->ctrl_info = NULL; iounmap(instance->reg_set); fail_ioremap: @@ -4918,6 +4942,7 @@ static int megasas_probe_one(struct pci_dev *pdev, struct Scsi_Host *host; struct megasas_instance *instance; u16 control = 0; + struct fusion_context *fusion = NULL; /* Reset MSI-X in the kdump kernel */ if (reset_devices) { @@ -4978,10 +5003,10 @@ static int megasas_probe_one(struct pci_dev *pdev, case PCI_DEVICE_ID_LSI_INVADER: case PCI_DEVICE_ID_LSI_FURY: { - struct fusion_context *fusion; - - instance->ctrl_context = - kzalloc(sizeof(struct fusion_context), GFP_KERNEL); + instance->ctrl_context_pages = + get_order(sizeof(struct fusion_context)); + instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL, + instance->ctrl_context_pages); if (!instance->ctrl_context) { printk(KERN_DEBUG "megasas: Failed to allocate " "memory for Fusion context info\n"); @@ -4990,6 +5015,8 @@ static int megasas_probe_one(struct pci_dev *pdev, fusion = instance->ctrl_context; INIT_LIST_HEAD(&fusion->cmd_pool); spin_lock_init(&fusion->cmd_pool_lock); + memset(fusion->load_balance_info, 0, + sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); } break; default: /* For all other supported controllers */ @@ -5035,7 +5062,6 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->issuepend_done = 1; instance->adprecovery = MEGASAS_HBA_OPERATIONAL; instance->is_imr = 0; - megasas_poll_wait_aen = 0; instance->evt_detail = pci_alloc_consistent(pdev, sizeof(struct @@ -5072,6 +5098,7 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->host = host; instance->unique_id = pdev->bus->number << 8 | pdev->devfn; instance->init_id = MEGASAS_DEFAULT_INIT_ID; + instance->ctrl_info = NULL; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { @@ -5632,14 +5659,18 @@ static void megasas_detach_one(struct pci_dev *pdev) case PCI_DEVICE_ID_LSI_INVADER: case PCI_DEVICE_ID_LSI_FURY: megasas_release_fusion(instance); - for (i = 0; i < 2 ; i++) + for (i = 0; i < 2 ; i++) { if (fusion->ld_map[i]) dma_free_coherent(&instance->pdev->dev, - fusion->map_sz, + fusion->max_map_sz, fusion->ld_map[i], - fusion-> - ld_map_phys[i]); - kfree(instance->ctrl_context); + fusion->ld_map_phys[i]); + if (fusion->ld_drv_map[i]) + free_pages((ulong)fusion->ld_drv_map[i], + fusion->drv_map_pages); + } + free_pages((ulong)instance->ctrl_context, + instance->ctrl_context_pages); break; default: megasas_release_mfi(instance); @@ -5652,6 +5683,8 @@ static void megasas_detach_one(struct pci_dev *pdev) break; } + kfree(instance->ctrl_info); + if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); @@ -5762,8 +5795,10 @@ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait) spin_lock_irqsave(&poll_aen_lock, flags); if (megasas_poll_wait_aen) mask = (POLLIN | POLLRDNORM); + else mask = 0; + megasas_poll_wait_aen = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); return mask; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 081bfff12d00..c2eaf6ef3d11 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -66,16 +66,13 @@ #define SPAN_INVALID 0xff /* Prototypes */ -void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map, - struct LD_LOAD_BALANCE_INFO *lbInfo); - -static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, +static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo); static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, - struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map); + struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map); static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld, - u64 strip, struct MR_FW_RAID_MAP_ALL *map); + u64 strip, struct MR_DRV_RAID_MAP_ALL *map); u32 mega_mod64(u64 dividend, u32 divisor) { @@ -109,94 +106,183 @@ u64 mega_div64_32(uint64_t dividend, uint32_t divisor) return d; } -struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map) +struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) { return &map->raidMap.ldSpanMap[ld].ldRaid; } static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld, - struct MR_FW_RAID_MAP_ALL + struct MR_DRV_RAID_MAP_ALL *map) { return &map->raidMap.ldSpanMap[ld].spanBlock[0]; } -static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map) +static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map) { return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; } -u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) +u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map) { return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); } -u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) +u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map) { return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); } -u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) +u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) { return map->raidMap.devHndlInfo[pd].curDevHdl; } -u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map) +u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) { return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); } -u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) +u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) { return map->raidMap.ldTgtIdToLd[ldTgtId]; } static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, - struct MR_FW_RAID_MAP_ALL *map) + struct MR_DRV_RAID_MAP_ALL *map) { return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; } +/* + * This function will Populate Driver Map using firmware raid map + */ +void MR_PopulateDrvRaidMap(struct megasas_instance *instance) +{ + struct fusion_context *fusion = instance->ctrl_context; + struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; + struct MR_FW_RAID_MAP *pFwRaidMap = NULL; + int i; + + + struct MR_DRV_RAID_MAP_ALL *drv_map = + fusion->ld_drv_map[(instance->map_id & 1)]; + struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; + + if (instance->supportmax256vd) { + memcpy(fusion->ld_drv_map[instance->map_id & 1], + fusion->ld_map[instance->map_id & 1], + fusion->current_map_sz); + /* New Raid map will not set totalSize, so keep expected value + * for legacy code in ValidateMapInfo + */ + pDrvRaidMap->totalSize = sizeof(struct MR_FW_RAID_MAP_EXT); + } else { + fw_map_old = (struct MR_FW_RAID_MAP_ALL *) + fusion->ld_map[(instance->map_id & 1)]; + pFwRaidMap = &fw_map_old->raidMap; + +#if VD_EXT_DEBUG + for (i = 0; i < pFwRaidMap->ldCount; i++) { + dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x " + "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n", + instance->unique_id, i, + fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, + fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, + fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); + } +#endif + + memset(drv_map, 0, fusion->drv_map_sz); + pDrvRaidMap->totalSize = pFwRaidMap->totalSize; + pDrvRaidMap->ldCount = pFwRaidMap->ldCount; + pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; + for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) + pDrvRaidMap->ldTgtIdToLd[i] = + (u8)pFwRaidMap->ldTgtIdToLd[i]; + for (i = 0; i < pDrvRaidMap->ldCount; i++) { + pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; +#if VD_EXT_DEBUG + dev_dbg(&instance->pdev->dev, + "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " + "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x " + "size 0x%x\n", i, i, + pFwRaidMap->ldSpanMap[i].ldRaid.targetId, + pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, + (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); + dev_dbg(&instance->pdev->dev, + "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " + "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x " + "size 0x%x\n", i, i, + pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, + pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, + (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); + dev_dbg(&instance->pdev->dev, "Driver raid map all %p " + "raid map %p LD RAID MAP %p/%p\n", drv_map, + pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid, + &pDrvRaidMap->ldSpanMap[i].ldRaid); +#endif + } + memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, + sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); + memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, + sizeof(struct MR_DEV_HANDLE_INFO) * + MAX_RAIDMAP_PHYSICAL_DEVICES); + } +} + /* * This function will validate Map info data provided by FW */ u8 MR_ValidateMapInfo(struct megasas_instance *instance) { - struct fusion_context *fusion = instance->ctrl_context; - struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)]; - struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info; - PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; - struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; + struct fusion_context *fusion; + struct MR_DRV_RAID_MAP_ALL *drv_map; + struct MR_DRV_RAID_MAP *pDrvRaidMap; + struct LD_LOAD_BALANCE_INFO *lbInfo; + PLD_SPAN_INFO ldSpanInfo; struct MR_LD_RAID *raid; int ldCount, num_lds; u16 ld; + u32 expected_size; - if (le32_to_cpu(pFwRaidMap->totalSize) != - (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) + - (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) { - printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n", - (unsigned int)((sizeof(struct MR_FW_RAID_MAP) - - sizeof(struct MR_LD_SPAN_MAP)) + - (sizeof(struct MR_LD_SPAN_MAP) * - le32_to_cpu(pFwRaidMap->ldCount)))); - printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize " - ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), - le32_to_cpu(pFwRaidMap->totalSize)); + MR_PopulateDrvRaidMap(instance); + + fusion = instance->ctrl_context; + drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; + pDrvRaidMap = &drv_map->raidMap; + + lbInfo = fusion->load_balance_info; + ldSpanInfo = fusion->log_to_span; + + if (instance->supportmax256vd) + expected_size = sizeof(struct MR_FW_RAID_MAP_EXT); + else + expected_size = + (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) + + (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pDrvRaidMap->ldCount))); + + if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { + dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n", + (unsigned int) expected_size); + dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n", + (unsigned int)sizeof(struct MR_LD_SPAN_MAP), + le32_to_cpu(pDrvRaidMap->totalSize)); return 0; } if (instance->UnevenSpanSupport) - mr_update_span_set(map, ldSpanInfo); + mr_update_span_set(drv_map, ldSpanInfo); - mr_update_load_balance_params(map, lbInfo); + mr_update_load_balance_params(drv_map, lbInfo); - num_lds = le32_to_cpu(map->raidMap.ldCount); + num_lds = le32_to_cpu(drv_map->raidMap.ldCount); /*Convert Raid capability values to CPU arch */ for (ldCount = 0; ldCount < num_lds; ldCount++) { - ld = MR_TargetIdToLdGet(ldCount, map); - raid = MR_LdRaidGet(ld, map); + ld = MR_TargetIdToLdGet(ldCount, drv_map); + raid = MR_LdRaidGet(ld, drv_map); le32_to_cpus((u32 *)&raid->capability); } @@ -204,7 +290,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) } u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, - struct MR_FW_RAID_MAP_ALL *map) + struct MR_DRV_RAID_MAP_ALL *map) { struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); struct MR_QUAD_ELEMENT *quad; @@ -246,7 +332,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, * ldSpanInfo - ldSpanInfo per HBA instance */ #if SPAN_DEBUG -static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) +static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map, + PLD_SPAN_INFO ldSpanInfo) { u8 span; @@ -257,9 +344,9 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) int ldCount; u16 ld; - for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); - if (ld >= MAX_LOGICAL_DRIVES) + if (ld >= MAX_LOGICAL_DRIVES_EXT) continue; raid = MR_LdRaidGet(ld, map); dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n", @@ -339,7 +426,7 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) */ u32 mr_spanset_get_span_block(struct megasas_instance *instance, - u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map) + u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); @@ -402,7 +489,7 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance, */ static u64 get_row_from_strip(struct megasas_instance *instance, - u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map) + u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); @@ -471,7 +558,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance, */ static u64 get_strip_from_row(struct megasas_instance *instance, - u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map) + u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); @@ -532,7 +619,7 @@ static u64 get_strip_from_row(struct megasas_instance *instance, */ static u32 get_arm_from_strip(struct megasas_instance *instance, - u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map) + u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); @@ -580,7 +667,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance, /* This Function will return Phys arm */ u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe, - struct MR_FW_RAID_MAP_ALL *map) + struct MR_DRV_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); /* Need to check correct default value */ @@ -624,7 +711,7 @@ u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe, static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, - struct MR_FW_RAID_MAP_ALL *map) + struct MR_DRV_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u32 pd, arRef; @@ -705,7 +792,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, - struct MR_FW_RAID_MAP_ALL *map) + struct MR_DRV_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u32 pd, arRef; @@ -794,7 +881,7 @@ u8 MR_BuildRaidContext(struct megasas_instance *instance, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, - struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN) + struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN) { struct MR_LD_RAID *raid; u32 ld, stripSize, stripe_mask; @@ -1043,7 +1130,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, * ldSpanInfo - ldSpanInfo per HBA instance * */ -void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, +void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) { u8 span, count; @@ -1056,9 +1143,9 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, u16 ld; - for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); - if (ld >= MAX_LOGICAL_DRIVES) + if (ld >= MAX_LOGICAL_DRIVES_EXT) continue; raid = MR_LdRaidGet(ld, map); for (element = 0; element < MAX_QUAD_DEPTH; element++) { @@ -1153,16 +1240,16 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, } void -mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map, +mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo) { int ldCount; u16 ld; struct MR_LD_RAID *raid; - for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); - if (ld >= MAX_LOGICAL_DRIVES) { + if (ld >= MAX_LOGICAL_DRIVES_EXT) { lbInfo[ldCount].loadBalanceFlag = 0; continue; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 913e9fa8fc15..ed24a848df5b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -652,6 +652,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) /* driver supports HA / Remote LUN over Fast Path interface */ init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun = 1; + init_frame->driver_operations.mfi_capabilities.support_max_255lds + = 1; /* Convert capability to LE32 */ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); @@ -711,6 +713,13 @@ fail_get_cmd: * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. + * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO + * dcmd.mbox.b[0] - number of LDs being sync'd + * dcmd.mbox.b[1] - 0 - complete command immediately. + * - 1 - pend till config change + * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP + * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and + * uses extended struct MR_FW_RAID_MAP_EXT */ static int megasas_get_ld_map_info(struct megasas_instance *instance) @@ -718,7 +727,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance) int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; - struct MR_FW_RAID_MAP_ALL *ci; + void *ci; dma_addr_t ci_h = 0; u32 size_map_info; struct fusion_context *fusion; @@ -739,10 +748,9 @@ megasas_get_ld_map_info(struct megasas_instance *instance) dcmd = &cmd->frame->dcmd; - size_map_info = sizeof(struct MR_FW_RAID_MAP) + - (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1)); + size_map_info = fusion->current_map_sz; - ci = fusion->ld_map[(instance->map_id & 1)]; + ci = (void *) fusion->ld_map[(instance->map_id & 1)]; ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; if (!ci) { @@ -751,9 +759,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance) return -ENOMEM; } - memset(ci, 0, sizeof(*ci)); + memset(ci, 0, fusion->max_map_sz); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); - +#if VD_EXT_DEBUG + dev_dbg(&instance->pdev->dev, + "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n", + __func__, cpu_to_le32(size_map_info)); +#endif dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; @@ -809,7 +821,7 @@ megasas_sync_map_info(struct megasas_instance *instance) u32 size_sync_info, num_lds; struct fusion_context *fusion; struct MR_LD_TARGET_SYNC *ci = NULL; - struct MR_FW_RAID_MAP_ALL *map; + struct MR_DRV_RAID_MAP_ALL *map; struct MR_LD_RAID *raid; struct MR_LD_TARGET_SYNC *ld_sync; dma_addr_t ci_h = 0; @@ -830,7 +842,7 @@ megasas_sync_map_info(struct megasas_instance *instance) return 1; } - map = fusion->ld_map[instance->map_id & 1]; + map = fusion->ld_drv_map[instance->map_id & 1]; num_lds = le32_to_cpu(map->raidMap.ldCount); @@ -842,7 +854,7 @@ megasas_sync_map_info(struct megasas_instance *instance) ci = (struct MR_LD_TARGET_SYNC *) fusion->ld_map[(instance->map_id - 1) & 1]; - memset(ci, 0, sizeof(struct MR_FW_RAID_MAP_ALL)); + memset(ci, 0, fusion->max_map_sz); ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; @@ -854,8 +866,7 @@ megasas_sync_map_info(struct megasas_instance *instance) ld_sync->seqNum = raid->seqNum; } - size_map_info = sizeof(struct MR_FW_RAID_MAP) + - (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1)); + size_map_info = fusion->current_map_sz; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; @@ -1018,17 +1029,75 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) goto fail_ioc_init; megasas_display_intel_branding(instance); + if (megasas_get_ctrl_info(instance, instance->ctrl_info)) { + dev_err(&instance->pdev->dev, + "Could not get controller info. Fail from %s %d\n", + __func__, __LINE__); + goto fail_ioc_init; + } + + instance->supportmax256vd = + instance->ctrl_info->adapterOperations3.supportMaxExtLDs; + /* Below is additional check to address future FW enhancement */ + if (instance->ctrl_info->max_lds > 64) + instance->supportmax256vd = 1; + instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS + * MEGASAS_MAX_DEV_PER_CHANNEL; + instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS + * MEGASAS_MAX_DEV_PER_CHANNEL; + if (instance->supportmax256vd) { + instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; + instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; + } else { + instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; + instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; + } + dev_info(&instance->pdev->dev, "Firmware supports %d VDs %d PDs\n" + "Driver supports %d VDs %d PDs\n", + instance->fw_supported_vd_count, + instance->fw_supported_pd_count, + instance->drv_supported_vd_count, + instance->drv_supported_pd_count); instance->flag_ieee = 1; + fusion->fast_path_io = 0; - fusion->map_sz = sizeof(struct MR_FW_RAID_MAP) + - (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1)); + fusion->old_map_sz = + sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) * + (instance->fw_supported_vd_count - 1)); + fusion->new_map_sz = + sizeof(struct MR_FW_RAID_MAP_EXT); + fusion->drv_map_sz = + sizeof(struct MR_DRV_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) * + (instance->drv_supported_vd_count - 1)); + + fusion->drv_map_pages = get_order(fusion->drv_map_sz); + for (i = 0; i < 2; i++) { + fusion->ld_map[i] = NULL; + fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL, + fusion->drv_map_pages); + if (!fusion->ld_drv_map[i]) { + dev_err(&instance->pdev->dev, "Could not allocate " + "memory for local map info for %d pages\n", + fusion->drv_map_pages); + if (i == 1) + free_pages((ulong)fusion->ld_drv_map[0], + fusion->drv_map_pages); + goto fail_ioc_init; + } + } + + fusion->max_map_sz = max(fusion->old_map_sz, fusion->new_map_sz); + + if (instance->supportmax256vd) + fusion->current_map_sz = fusion->new_map_sz; + else + fusion->current_map_sz = fusion->old_map_sz; - fusion->fast_path_io = 0; for (i = 0; i < 2; i++) { fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, - fusion->map_sz, + fusion->max_map_sz, &fusion->ld_map_phys[i], GFP_KERNEL); if (!fusion->ld_map[i]) { @@ -1045,7 +1114,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) fail_map_info: if (i == 1) - dma_free_coherent(&instance->pdev->dev, fusion->map_sz, + dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz, fusion->ld_map[0], fusion->ld_map_phys[0]); fail_ioc_init: megasas_free_cmds_fusion(instance); @@ -1232,7 +1301,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, void megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, - struct MR_FW_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) + struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) { struct MR_LD_RAID *raid; u32 ld; @@ -1417,7 +1486,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; struct IO_REQUEST_INFO io_info; struct fusion_context *fusion; - struct MR_FW_RAID_MAP_ALL *local_map_ptr; + struct MR_DRV_RAID_MAP_ALL *local_map_ptr; u8 *raidLUN; device_id = MEGASAS_DEV_INDEX(instance, scp); @@ -1494,10 +1563,10 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) io_info.isRead = 1; - local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; + local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= - MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) { + instance->fw_supported_vd_count) || (!fusion->fast_path_io)) { io_request->RaidContext.regLockFlags = 0; fp_possible = 0; } else { @@ -1587,7 +1656,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, u32 device_id; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; u16 pd_index = 0; - struct MR_FW_RAID_MAP_ALL *local_map_ptr; + struct MR_DRV_RAID_MAP_ALL *local_map_ptr; struct fusion_context *fusion = instance->ctrl_context; u8 span, physArm; u16 devHandle; @@ -1599,7 +1668,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, device_id = MEGASAS_DEV_INDEX(instance, scmd); pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +scmd->device->id; - local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; + local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); @@ -1647,7 +1716,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, goto NonFastPath; ld = MR_TargetIdToLdGet(device_id, local_map_ptr); - if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) + if ((ld >= instance->fw_supported_vd_count) || + (!fusion->fast_path_io)) goto NonFastPath; raid = MR_LdRaidGet(ld, local_map_ptr); @@ -2722,7 +2792,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) /* Reset load balance info */ memset(fusion->load_balance_info, 0, sizeof(struct LD_LOAD_BALANCE_INFO) - *MAX_LOGICAL_DRIVES); + *MAX_LOGICAL_DRIVES_EXT); if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index d660c4df4153..065c27031c9c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -479,10 +479,13 @@ struct MPI2_IOC_INIT_REQUEST { #define MAX_ROW_SIZE 32 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) #define MAX_LOGICAL_DRIVES 64 +#define MAX_LOGICAL_DRIVES_EXT 256 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES) #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES) #define MAX_ARRAYS 128 #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS) +#define MAX_ARRAYS_EXT 256 +#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT) #define MAX_PHYSICAL_DEVICES 256 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 @@ -602,7 +605,6 @@ struct MR_FW_RAID_MAP { u32 maxArrays; } validationInfo; u32 version[5]; - u32 reserved1[5]; }; u32 ldCount; @@ -714,6 +716,81 @@ struct MR_FW_RAID_MAP_ALL { struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1]; } __attribute__ ((packed)); +struct MR_DRV_RAID_MAP { + /* total size of this structure, including this field. + * This feild will be manupulated by driver for ext raid map, + * else pick the value from firmware raid map. + */ + u32 totalSize; + + union { + struct { + u32 maxLd; + u32 maxSpanDepth; + u32 maxRowSize; + u32 maxPdCount; + u32 maxArrays; + } validationInfo; + u32 version[5]; + }; + + /* timeout value used by driver in FP IOs*/ + u8 fpPdIoTimeoutSec; + u8 reserved2[7]; + + u16 ldCount; + u16 arCount; + u16 spanCount; + u16 reserve3; + + struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; + u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; + struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT]; + struct MR_LD_SPAN_MAP ldSpanMap[1]; + +}; + +/* Driver raid map size is same as raid map ext + * MR_DRV_RAID_MAP_ALL is created to sync with old raid. + * And it is mainly for code re-use purpose. + */ +struct MR_DRV_RAID_MAP_ALL { + + struct MR_DRV_RAID_MAP raidMap; + struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1]; +} __packed; + + + +struct MR_FW_RAID_MAP_EXT { + /* Not usred in new map */ + u32 reserved; + + union { + struct { + u32 maxLd; + u32 maxSpanDepth; + u32 maxRowSize; + u32 maxPdCount; + u32 maxArrays; + } validationInfo; + u32 version[5]; + }; + + u8 fpPdIoTimeoutSec; + u8 reserved2[7]; + + u16 ldCount; + u16 arCount; + u16 spanCount; + u16 reserve3; + + struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; + u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; + struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT]; + struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT]; +}; + struct fusion_context { struct megasas_cmd_fusion **cmd_list; struct list_head cmd_pool; @@ -750,10 +827,18 @@ struct fusion_context { struct MR_FW_RAID_MAP_ALL *ld_map[2]; dma_addr_t ld_map_phys[2]; - u32 map_sz; + /*Non dma-able memory. Driver local copy.*/ + struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2]; + + u32 max_map_sz; + u32 current_map_sz; + u32 old_map_sz; + u32 new_map_sz; + u32 drv_map_sz; + u32 drv_map_pages; u8 fast_path_io; - struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES]; - LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES]; + struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT]; + LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT]; }; union desc_value { @@ -764,4 +849,5 @@ union desc_value { } u; }; + #endif /* _MEGARAID_SAS_FUSION_H_ */ -- cgit From fb1a24ff65cec6aecd28caff4e6565591182e381 Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Fri, 12 Sep 2014 18:57:38 +0530 Subject: megaraid_sas : Host lock less mode to enabled asynchronous IO submission Resending the patch. Addressed the review comments from Tomas Henzl. Megaraid_sas driver can now work in host lock less mode. Remove host lock less as megaraid_sas driver will have safer access to raid map as described in earlier patch. We now keep Driver Raid map copy, which will make sure that driver will always have old or new map Driver raid map will be replaced safely in MR_PopulateDrvRaidMap(), so there is no issue even if IO is continue from the scsi mid layer. There is a plan to remove "host_lock" and "hba_lock" usage from megaraid_sas in future. Signed-off-by: Sumit Saxena Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas_base.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index c7ddb129349d..bc79a0568acf 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1536,7 +1536,7 @@ out_return_cmd: * @done: Callback entry point */ static int -megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) +megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { struct megasas_instance *instance; unsigned long flags; @@ -1558,7 +1558,7 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd } else { spin_unlock_irqrestore(&instance->hba_lock, flags); scmd->result = DID_NO_CONNECT << 16; - done(scmd); + scmd->scsi_done(scmd); return 0; } } @@ -1566,7 +1566,7 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { spin_unlock_irqrestore(&instance->hba_lock, flags); scmd->result = DID_NO_CONNECT << 16; - done(scmd); + scmd->scsi_done(scmd); return 0; } @@ -1577,7 +1577,6 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd spin_unlock_irqrestore(&instance->hba_lock, flags); - scmd->scsi_done = done; scmd->result = 0; if (MEGASAS_IS_LOGICAL(scmd) && @@ -1607,12 +1606,10 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd return 0; out_done: - done(scmd); + scmd->scsi_done(scmd); return 0; } -static DEF_SCSI_QCMD(megasas_queue_command) - static struct megasas_instance *megasas_lookup_instance(u16 host_no) { int i; -- cgit From a5fd2858e21af03555751124347e509b19c7b6f3 Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Fri, 12 Sep 2014 18:57:43 +0530 Subject: megaraid_sas : Round down max sge supported by controller to power of two Resending the patch. Addressed the review comments from Tomas Henzl. Round down the max sge to power of two. Earlier max sge limit is 70 SGE, which will allow block layer to send 280K IO frame. It is optimal to provide max IO size aligned to the smallest possible stripe size. E.a Consider that we have configured RAID Volumes which does not allow Fast Path across the stripe. Raid volume with stripe size = 256K, will have peformance hit if we get io frame of size 280K. Driver will not send IO frame large than stripe size to the Fast Path. Also, FW will convert 280K frame into 256K + 24K. This is an additional overhead. Signed-off-by: Sumit Saxena Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index ed24a848df5b..d35ac34ea624 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1001,8 +1001,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) fusion->max_sge_in_chain = MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION); - instance->max_num_sge = fusion->max_sge_in_main_msg + - fusion->max_sge_in_chain - 2; + instance->max_num_sge = rounddown_pow_of_two( + fusion->max_sge_in_main_msg + fusion->max_sge_in_chain - 2); /* Used for pass thru MFI frame (DCMD) */ fusion->chain_offset_mfi_pthru = -- cgit From ac95136ad40fc72c4842fee1ef002390ad96bfa0 Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Fri, 12 Sep 2014 18:57:48 +0530 Subject: megaraid_sas : Add module parameter to disable IRQ-CPU affinity hint Resending the patch. Addressed the review comments from Tomas Henzl. For certain deployment, we may need to disable irq cpu affinity hint. This module parameter provides option for use to disable irq cpu affinity hint and allow irqbalancer to handle the rest. Signed-off-by: Sumit Saxena Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas_base.c | 60 +++++++++++++++++++------------ 1 file changed, 38 insertions(+), 22 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index bc79a0568acf..4c4c266e2388 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -89,6 +89,10 @@ module_param(resetwaittime, int, S_IRUGO); MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout " "before resetting adapter. Default: 180"); +int smp_affinity_enable = 1; +module_param(smp_affinity_enable, int, S_IRUGO); +MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); + MODULE_LICENSE("GPL"); MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("megaraidlinux@lsi.com"); @@ -5162,8 +5166,9 @@ retry_irq_register: printk(KERN_DEBUG "megasas: Failed to " "register IRQ for vector %d.\n", i); for (j = 0; j < i; j++) { - irq_set_affinity_hint( - instance->msixentry[j].vector, NULL); + if (smp_affinity_enable) + irq_set_affinity_hint( + instance->msixentry[j].vector, NULL); free_irq( instance->msixentry[j].vector, &instance->irq_context[j]); @@ -5172,11 +5177,14 @@ retry_irq_register: instance->msix_vectors = 0; goto retry_irq_register; } - if (irq_set_affinity_hint(instance->msixentry[i].vector, - get_cpu_mask(cpu))) - dev_err(&instance->pdev->dev, "Error setting" - "affinity hint for cpu %d\n", cpu); - cpu = cpumask_next(cpu, cpu_online_mask); + if (smp_affinity_enable) { + if (irq_set_affinity_hint(instance->msixentry[i].vector, + get_cpu_mask(cpu))) + dev_err(&instance->pdev->dev, + "Error setting affinity hint " + "for cpu %d\n", cpu); + cpu = cpumask_next(cpu, cpu_online_mask); + } } } else { instance->irq_context[0].instance = instance; @@ -5235,8 +5243,9 @@ retry_irq_register: instance->instancet->disable_intr(instance); if (instance->msix_vectors) for (i = 0; i < instance->msix_vectors; i++) { - irq_set_affinity_hint( - instance->msixentry[i].vector, NULL); + if (smp_affinity_enable) + irq_set_affinity_hint( + instance->msixentry[i].vector, NULL); free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); } @@ -5399,8 +5408,9 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) if (instance->msix_vectors) for (i = 0; i < instance->msix_vectors; i++) { - irq_set_affinity_hint( - instance->msixentry[i].vector, NULL); + if (smp_affinity_enable) + irq_set_affinity_hint( + instance->msixentry[i].vector, NULL); free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); } @@ -5509,8 +5519,9 @@ megasas_resume(struct pci_dev *pdev) printk(KERN_DEBUG "megasas: Failed to " "register IRQ for vector %d.\n", i); for (j = 0; j < i; j++) { - irq_set_affinity_hint( - instance->msixentry[j].vector, NULL); + if (smp_affinity_enable) + irq_set_affinity_hint( + instance->msixentry[j].vector, NULL); free_irq( instance->msixentry[j].vector, &instance->irq_context[j]); @@ -5518,11 +5529,14 @@ megasas_resume(struct pci_dev *pdev) goto fail_irq; } - if (irq_set_affinity_hint(instance->msixentry[i].vector, - get_cpu_mask(cpu))) - dev_err(&instance->pdev->dev, "Error setting" - "affinity hint for cpu %d\n", cpu); - cpu = cpumask_next(cpu, cpu_online_mask); + if (smp_affinity_enable) { + if (irq_set_affinity_hint(instance->msixentry[i].vector, + get_cpu_mask(cpu))) + dev_err(&instance->pdev->dev, "Error " + "setting affinity hint for cpu " + "%d\n", cpu); + cpu = cpumask_next(cpu, cpu_online_mask); + } } } else { instance->irq_context[0].instance = instance; @@ -5640,8 +5654,9 @@ static void megasas_detach_one(struct pci_dev *pdev) if (instance->msix_vectors) for (i = 0; i < instance->msix_vectors; i++) { - irq_set_affinity_hint( - instance->msixentry[i].vector, NULL); + if (smp_affinity_enable) + irq_set_affinity_hint( + instance->msixentry[i].vector, NULL); free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); } @@ -5729,8 +5744,9 @@ static void megasas_shutdown(struct pci_dev *pdev) instance->instancet->disable_intr(instance); if (instance->msix_vectors) for (i = 0; i < instance->msix_vectors; i++) { - irq_set_affinity_hint( - instance->msixentry[i].vector, NULL); + if (smp_affinity_enable) + irq_set_affinity_hint( + instance->msixentry[i].vector, NULL); free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); } -- cgit From d2552ebe885314d3c8352e35f2fae2a7478ac778 Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Fri, 12 Sep 2014 18:57:53 +0530 Subject: megaraid_sas : N-drive primary raid level 1 load balancing Resending the patch. Addressed the review comments from Tomas Henzl. Current driver does fast path read load balancing between arm and mirror disk for two Drive Raid-1 configuration only. Now, Driver support fast path read load balancing for all (any number of disk) Raid-1 configuration. Signed-off-by: Sumit Saxena Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas.h | 8 +- drivers/scsi/megaraid/megaraid_sas_fp.c | 134 ++++++++++++++++------------ drivers/scsi/megaraid/megaraid_sas_fusion.c | 14 ++- drivers/scsi/megaraid/megaraid_sas_fusion.h | 8 +- 4 files changed, 96 insertions(+), 68 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 5dedf09fc46d..156d4b97d2bd 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1188,7 +1188,8 @@ union megasas_sgl_frame { typedef union _MFI_CAPABILITIES { struct { #if defined(__BIG_ENDIAN_BITFIELD) - u32 reserved:28; + u32 reserved:27; + u32 support_ndrive_r1_lb:1; u32 support_max_255lds:1; u32 reserved1:1; u32 support_additional_msix:1; @@ -1198,7 +1199,8 @@ typedef union _MFI_CAPABILITIES { u32 support_additional_msix:1; u32 reserved1:1; u32 support_max_255lds:1; - u32 reserved:28; + u32 support_ndrive_r1_lb:1; + u32 reserved:27; #endif } mfi_capabilities; u32 reg; @@ -1914,6 +1916,8 @@ u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map); u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map); u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); +u16 get_updated_dev_handle(struct megasas_instance *instance, + struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo); int megasas_get_ctrl_info(struct megasas_instance *instance, diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index c2eaf6ef3d11..685e6f391fe4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -55,6 +55,13 @@ #include "megaraid_sas.h" #include +#define LB_PENDING_CMDS_DEFAULT 4 +static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; +module_param(lb_pending_cmds, int, S_IRUGO); +MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding " + "threshold. Valid Values are 1-128. Default: 4"); + + #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) #define MR_LD_STATE_OPTIMAL 3 #define FALSE 0 @@ -769,6 +776,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = pRAID_Context->spanArm; return retval; } @@ -865,6 +873,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = pRAID_Context->spanArm; return retval; } @@ -1131,7 +1140,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, * */ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, - PLD_SPAN_INFO ldSpanInfo) + PLD_SPAN_INFO ldSpanInfo) { u8 span, count; u32 element, span_row_width; @@ -1239,90 +1248,105 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, } -void -mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, - struct LD_LOAD_BALANCE_INFO *lbInfo) +void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, + struct LD_LOAD_BALANCE_INFO *lbInfo) { int ldCount; u16 ld; struct MR_LD_RAID *raid; + if (lb_pending_cmds > 128 || lb_pending_cmds < 1) + lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { - ld = MR_TargetIdToLdGet(ldCount, map); + ld = MR_TargetIdToLdGet(ldCount, drv_map); if (ld >= MAX_LOGICAL_DRIVES_EXT) { lbInfo[ldCount].loadBalanceFlag = 0; continue; } - raid = MR_LdRaidGet(ld, map); - - /* Two drive Optimal RAID 1 */ - if ((raid->level == 1) && (raid->rowSize == 2) && - (raid->spanDepth == 1) && raid->ldState == - MR_LD_STATE_OPTIMAL) { - u32 pd, arRef; - - lbInfo[ldCount].loadBalanceFlag = 1; - - /* Get the array on which this span is present */ - arRef = MR_LdSpanArrayGet(ld, 0, map); - - /* Get the Pd */ - pd = MR_ArPdGet(arRef, 0, map); - /* Get dev handle from Pd */ - lbInfo[ldCount].raid1DevHandle[0] = - MR_PdDevHandleGet(pd, map); - /* Get the Pd */ - pd = MR_ArPdGet(arRef, 1, map); - - /* Get the dev handle from Pd */ - lbInfo[ldCount].raid1DevHandle[1] = - MR_PdDevHandleGet(pd, map); - } else + raid = MR_LdRaidGet(ld, drv_map); + if ((raid->level != 1) || + (raid->ldState != MR_LD_STATE_OPTIMAL)) { lbInfo[ldCount].loadBalanceFlag = 0; + continue; + } + lbInfo[ldCount].loadBalanceFlag = 1; } } -u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block, - u32 count) +u8 megasas_get_best_arm_pd(struct megasas_instance *instance, + struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) { - u16 pend0, pend1; + struct fusion_context *fusion; + struct MR_LD_RAID *raid; + struct MR_DRV_RAID_MAP_ALL *drv_map; + u16 pend0, pend1, ld; u64 diff0, diff1; - u8 bestArm; + u8 bestArm, pd0, pd1, span, arm; + u32 arRef, span_row_size; + + u64 block = io_info->ldStartBlock; + u32 count = io_info->numBlocks; + + span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) + >> RAID_CTX_SPANARM_SPAN_SHIFT); + arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); + + + fusion = instance->ctrl_context; + drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; + ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); + raid = MR_LdRaidGet(ld, drv_map); + span_row_size = instance->UnevenSpanSupport ? + SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; + + arRef = MR_LdSpanArrayGet(ld, span, drv_map); + pd0 = MR_ArPdGet(arRef, arm, drv_map); + pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? + (arm + 1 - span_row_size) : arm + 1, drv_map); /* get the pending cmds for the data and mirror arms */ - pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]); - pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]); + pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]); + pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]); /* Determine the disk whose head is nearer to the req. block */ - diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]); - diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]); - bestArm = (diff0 <= diff1 ? 0 : 1); + diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); + diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); + bestArm = (diff0 <= diff1 ? arm : arm ^ 1); - /*Make balance count from 16 to 4 to keep driver in sync with Firmware*/ - if ((bestArm == arm && pend0 > pend1 + 4) || - (bestArm != arm && pend1 > pend0 + 4)) + if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || + (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) bestArm ^= 1; /* Update the last accessed block on the correct pd */ - lbInfo->last_accessed_block[bestArm] = block + count - 1; - - return bestArm; + io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; + lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1; + io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; +#if SPAN_DEBUG + if (arm != bestArm) + dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance " + "occur - span 0x%x arm 0x%x bestArm 0x%x " + "io_info->span_arm 0x%x\n", + span, arm, bestArm, io_info->span_arm); +#endif + return io_info->pd_after_lb; } -u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo, - struct IO_REQUEST_INFO *io_info) +u16 get_updated_dev_handle(struct megasas_instance *instance, + struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) { - u8 arm, old_arm; + u8 arm_pd; u16 devHandle; + struct fusion_context *fusion; + struct MR_DRV_RAID_MAP_ALL *drv_map; - old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1; - - /* get best new arm */ - arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, - io_info->numBlocks); - devHandle = lbInfo->raid1DevHandle[arm]; - atomic_inc(&lbInfo->scsi_pending_cmds[arm]); + fusion = instance->ctrl_context; + drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; + /* get best new arm (PD ID) */ + arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info); + devHandle = MR_PdDevHandleGet(arm_pd, drv_map); + atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); return devHandle; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index d35ac34ea624..54c720ccb3e6 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -76,8 +76,6 @@ megasas_issue_polled(struct megasas_instance *instance, void megasas_check_and_restore_queue_depth(struct megasas_instance *instance); -u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo, - struct IO_REQUEST_INFO *in_info); int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); void megaraid_sas_kill_hba(struct megasas_instance *instance); @@ -654,6 +652,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) = 1; init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; + init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb + = 1; /* Convert capability to LE32 */ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); @@ -1606,10 +1606,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, if ((fusion->load_balance_info[device_id].loadBalanceFlag) && (io_info.isRead)) { io_info.devHandle = - get_updated_dev_handle( + get_updated_dev_handle(instance, &fusion->load_balance_info[device_id], &io_info); scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; + cmd->pd_r1_lb = io_info.pd_after_lb; } else scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; @@ -1942,7 +1943,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) struct megasas_cmd *cmd_mfi; struct megasas_cmd_fusion *cmd_fusion; u16 smid, num_completed; - u8 reply_descript_type, arm; + u8 reply_descript_type; u32 status, extStatus, device_id; union desc_value d_val; struct LD_LOAD_BALANCE_INFO *lbinfo; @@ -1993,10 +1994,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) lbinfo = &fusion->load_balance_info[device_id]; if (cmd_fusion->scmd->SCp.Status & MEGASAS_LOAD_BALANCE_FLAG) { - arm = lbinfo->raid1DevHandle[0] == - cmd_fusion->io_request->DevHandle ? 0 : - 1; - atomic_dec(&lbinfo->scsi_pending_cmds[arm]); + atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 065c27031c9c..75844260fae9 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -630,6 +630,8 @@ struct IO_REQUEST_INFO { u8 start_span; u8 reserved; u64 start_row; + u8 span_arm; /* span[7:5], arm[4:0] */ + u8 pd_after_lb; }; struct MR_LD_TARGET_SYNC { @@ -681,14 +683,14 @@ struct megasas_cmd_fusion { u32 sync_cmd_idx; u32 index; u8 flags; + u8 pd_r1_lb; }; struct LD_LOAD_BALANCE_INFO { u8 loadBalanceFlag; u8 reserved1; - u16 raid1DevHandle[2]; - atomic_t scsi_pending_cmds[2]; - u64 last_accessed_block[2]; + atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES]; + u64 last_accessed_block[MAX_PHYSICAL_DEVICES]; }; /* SPAN_SET is info caclulated from span info from Raid map per LD */ -- cgit From 90dc9d98f01bdfe6d75853311195c6279886f3b8 Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Fri, 12 Sep 2014 18:57:58 +0530 Subject: megaraid_sas : MFI MPT linked list corruption fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Resending the patch. Addressed the review comments from Tomas Henzl. Added comment for to-do work. Problem statement: MFI link list in megaraid_sas driver is used from mfi-mpt pass-through commands. This list can be corrupted due to many possible race conditions in driver and eventually we may see kernel panic. One example - MFI frame is freed from calling process as driver send command via polling method and interrupt for that command comes after driver free mfi frame (actually even after some other context reuse the mfi frame). When driver receive MPT frame in ISR, driver will be using the index of MFI and access that MFI frame and finally in-used MFI frame’s list will be corrupted. High level description of new solution - Free MFI and MPT command from same context. Free both the command either from process (from where mfi-mpt pass-through was called) or from ISR context. Do not split freeing of MFI and MPT, because it creates the race condition which will do MFI/MPT list corruption. Renamed the cmd_pool_lock which is used in instance as well as fusion with below name. mfi_pool_lock and mpt_pool_lock to add more code readability. Signed-off-by: Sumit Saxena Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas.h | 25 +++- drivers/scsi/megaraid/megaraid_sas_base.c | 196 ++++++++++++++++++++-------- drivers/scsi/megaraid/megaraid_sas_fusion.c | 101 ++++++++++---- drivers/scsi/megaraid/megaraid_sas_fusion.h | 2 +- 4 files changed, 241 insertions(+), 83 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 156d4b97d2bd..f99db18c809c 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1016,6 +1016,12 @@ struct megasas_ctrl_info { #define VD_EXT_DEBUG 0 +enum MR_MFI_MPT_PTHR_FLAGS { + MFI_MPT_DETACHED = 0, + MFI_LIST_ADDED = 1, + MFI_MPT_ATTACHED = 2, +}; + /* Frame Type */ #define IO_FRAME 0 #define PTHRU_FRAME 1 @@ -1033,7 +1039,7 @@ struct megasas_ctrl_info { #define MEGASAS_IOCTL_CMD 0 #define MEGASAS_DEFAULT_CMD_TIMEOUT 90 #define MEGASAS_THROTTLE_QUEUE_DEPTH 16 - +#define MEGASAS_BLOCKED_CMD_TIMEOUT 60 /* * FW reports the maximum of number of commands that it can accept (maximum * commands that can be outstanding) at any time. The driver must report a @@ -1652,7 +1658,7 @@ struct megasas_instance { struct megasas_cmd **cmd_list; struct list_head cmd_pool; /* used to sync fire the cmd to fw */ - spinlock_t cmd_pool_lock; + spinlock_t mfi_pool_lock; /* used to sync fire the cmd to fw */ spinlock_t hba_lock; /* used to synch producer, consumer ptrs in dpc */ @@ -1839,6 +1845,11 @@ struct megasas_cmd { struct list_head list; struct scsi_cmnd *scmd; + + void *mpt_pthr_cmd_blocked; + atomic_t mfi_mpt_pthr; + u8 is_wait_event; + struct megasas_instance *instance; union { struct { @@ -1927,4 +1938,14 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance, void megasas_free_host_crash_buffer(struct megasas_instance *instance); void megasas_fusion_crash_dump_wq(struct work_struct *work); +void megasas_return_cmd_fusion(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd); +int megasas_issue_blocked_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd, int timeout); +void __megasas_return_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd); + +void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance, + struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion); + #endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 4c4c266e2388..d8cc9227e1ef 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -210,43 +210,66 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance unsigned long flags; struct megasas_cmd *cmd = NULL; - spin_lock_irqsave(&instance->cmd_pool_lock, flags); + spin_lock_irqsave(&instance->mfi_pool_lock, flags); if (!list_empty(&instance->cmd_pool)) { cmd = list_entry((&instance->cmd_pool)->next, struct megasas_cmd, list); list_del_init(&cmd->list); + atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_DETACHED); } else { printk(KERN_ERR "megasas: Command pool empty!\n"); } - spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); + spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); return cmd; } /** - * megasas_return_cmd - Return a cmd to free command pool + * __megasas_return_cmd - Return a cmd to free command pool * @instance: Adapter soft state * @cmd: Command packet to be returned to free command pool */ inline void -megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) +__megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { - unsigned long flags; - - spin_lock_irqsave(&instance->cmd_pool_lock, flags); + /* + * Don't go ahead and free the MFI frame, if corresponding + * MPT frame is not freed(valid for only fusion adapters). + * In case of MFI adapters, anyways for any allocated MFI + * frame will have cmd->mfi_mpt_mpthr set to MFI_MPT_DETACHED + */ + if (atomic_read(&cmd->mfi_mpt_pthr) != MFI_MPT_DETACHED) + return; cmd->scmd = NULL; cmd->frame_count = 0; + cmd->is_wait_event = 0; + cmd->mpt_pthr_cmd_blocked = NULL; + if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && - (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) && (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) && (reset_devices)) cmd->frame->hdr.cmd = MFI_CMD_INVALID; - list_add_tail(&cmd->list, &instance->cmd_pool); - spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); + atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED); + list_add(&cmd->list, (&instance->cmd_pool)->next); +} + +/** + * megasas_return_cmd - Return a cmd to free command pool + * @instance: Adapter soft state + * @cmd: Command packet to be returned to free command pool + */ +inline void +megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&instance->mfi_pool_lock, flags); + __megasas_return_cmd(instance, cmd); + spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); } @@ -925,13 +948,14 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs * Used to issue ioctl commands. */ -static int +int megasas_issue_blocked_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, int timeout) { int ret = 0; cmd->cmd_status = ENODATA; + cmd->is_wait_event = 1; instance->instancet->issue_dcmd(instance, cmd); if (timeout) { ret = wait_event_timeout(instance->int_cmd_wait_q, @@ -1903,7 +1927,12 @@ out: new_affiliation_111, new_affiliation_111_h); } - megasas_return_cmd(instance, cmd); + + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return retval; } @@ -2070,7 +2099,11 @@ out: (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), new_affiliation, new_affiliation_h); - megasas_return_cmd(instance, cmd); + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return retval; } @@ -2530,7 +2563,12 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) cmd->abort_aen = 0; instance->aen_cmd = NULL; - megasas_return_cmd(instance, cmd); + + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); if ((instance->unload == 0) && ((instance->issuepend_done == 1))) { @@ -2906,7 +2944,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, "failed, status = 0x%x.\n", cmd->frame->hdr.cmd_status); else { - megasas_return_cmd(instance, cmd); + megasas_return_mfi_mpt_pthr(instance, + cmd, cmd->mpt_pthr_cmd_blocked); spin_unlock_irqrestore( instance->host->host_lock, flags); @@ -2914,7 +2953,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, } } else instance->map_id++; - megasas_return_cmd(instance, cmd); + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); /* * Set fast path IO to ZERO. @@ -3070,7 +3110,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance) unsigned long flags; defer_index = 0; - spin_lock_irqsave(&instance->cmd_pool_lock, flags); + spin_lock_irqsave(&instance->mfi_pool_lock, flags); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->sync_cmd == 1 || cmd->scmd) { @@ -3091,7 +3131,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance) &instance->internal_reset_pending_q); } } - spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); + spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); } @@ -3656,7 +3696,9 @@ int megasas_alloc_cmds(struct megasas_instance *instance) int j; u32 max_cmd; struct megasas_cmd *cmd; + struct fusion_context *fusion; + fusion = instance->ctrl_context; max_cmd = instance->max_mfi_cmds; /* @@ -3689,13 +3731,11 @@ int megasas_alloc_cmds(struct megasas_instance *instance) } } - /* - * Add all the commands to command pool (instance->cmd_pool) - */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; memset(cmd, 0, sizeof(struct megasas_cmd)); cmd->index = i; + atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED); cmd->scmd = NULL; cmd->instance = instance; @@ -3766,11 +3806,11 @@ megasas_get_pd_list(struct megasas_instance *instance) dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); - if (!megasas_issue_polled(instance, cmd)) { - ret = 0; - } else { - ret = -1; - } + if (instance->ctrl_context && !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, + MEGASAS_BLOCKED_CMD_TIMEOUT); + else + ret = megasas_issue_polled(instance, cmd); /* * the following function will get the instance PD LIST. @@ -3802,7 +3842,12 @@ megasas_get_pd_list(struct megasas_instance *instance) pci_free_consistent(instance->pdev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), ci, ci_h); - megasas_return_cmd(instance, cmd); + + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return ret; } @@ -3861,11 +3906,12 @@ megasas_get_ld_list(struct megasas_instance *instance) dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); dcmd->pad_0 = 0; - if (!megasas_issue_polled(instance, cmd)) { - ret = 0; - } else { - ret = -1; - } + if (instance->ctrl_context && !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, + MEGASAS_BLOCKED_CMD_TIMEOUT); + else + ret = megasas_issue_polled(instance, cmd); + ld_count = le32_to_cpu(ci->ldCount); @@ -3888,7 +3934,11 @@ megasas_get_ld_list(struct megasas_instance *instance) ci, ci_h); - megasas_return_cmd(instance, cmd); + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return ret; } @@ -3949,12 +3999,11 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); dcmd->pad_0 = 0; - if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) { - ret = 0; - } else { - /* On failure, call older LD list DCMD */ - ret = 1; - } + if (instance->ctrl_context && !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, + MEGASAS_BLOCKED_CMD_TIMEOUT); + else + ret = megasas_issue_polled(instance, cmd); tgtid_count = le32_to_cpu(ci->count); @@ -3970,7 +4019,11 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), ci, ci_h); - megasas_return_cmd(instance, cmd); + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return ret; } @@ -4027,17 +4080,23 @@ megasas_get_ctrl_info(struct megasas_instance *instance, dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); dcmd->mbox.b[0] = 1; - if (!megasas_issue_polled(instance, cmd)) { - ret = 0; + if (instance->ctrl_context && !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, + MEGASAS_BLOCKED_CMD_TIMEOUT); + else + ret = megasas_issue_polled(instance, cmd); + + if (!ret) memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); - } else { - ret = -1; - } pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), ci, ci_h); - megasas_return_cmd(instance, cmd); + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return ret; } @@ -4086,11 +4145,17 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance, dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h); dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE); - if (!megasas_issue_polled(instance, cmd)) - ret = 0; + if (instance->ctrl_context && !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, + MEGASAS_BLOCKED_CMD_TIMEOUT); else - ret = -1; - megasas_return_cmd(instance, cmd); + ret = megasas_issue_polled(instance, cmd); + + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return ret; } @@ -4660,7 +4725,11 @@ megasas_get_seq_num(struct megasas_instance *instance, pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), el_info, el_info_h); - megasas_return_cmd(instance, cmd); + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return 0; } @@ -5015,7 +5084,7 @@ static int megasas_probe_one(struct pci_dev *pdev, } fusion = instance->ctrl_context; INIT_LIST_HEAD(&fusion->cmd_pool); - spin_lock_init(&fusion->cmd_pool_lock); + spin_lock_init(&fusion->mpt_pool_lock); memset(fusion->load_balance_info, 0, sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); } @@ -5086,7 +5155,7 @@ static int megasas_probe_one(struct pci_dev *pdev, init_waitqueue_head(&instance->int_cmd_wait_q); init_waitqueue_head(&instance->abort_cmd_wait_q); - spin_lock_init(&instance->cmd_pool_lock); + spin_lock_init(&instance->mfi_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->completion_lock); @@ -5106,7 +5175,7 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->flag_ieee = 1; sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); } else - sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); + sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5)); megasas_dbg_lvl = 0; instance->flag = 0; @@ -5318,7 +5387,11 @@ static void megasas_flush_cache(struct megasas_instance *instance) dev_err(&instance->pdev->dev, "Command timedout" " from %s\n", __func__); - megasas_return_cmd(instance, cmd); + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return; } @@ -5365,7 +5438,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, dev_err(&instance->pdev->dev, "Command timedout" "from %s\n", __func__); - megasas_return_cmd(instance, cmd); + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return; } @@ -6026,9 +6103,14 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, le32_to_cpu(kern_sge32[i].length), kbuff_arr[i], le32_to_cpu(kern_sge32[i].phys_addr)); + kbuff_arr[i] = NULL; } - megasas_return_cmd(instance, cmd); + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return error; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 54c720ccb3e6..f37eed682c75 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -50,6 +50,7 @@ #include #include #include +#include #include "megaraid_sas_fusion.h" #include "megaraid_sas.h" @@ -163,7 +164,7 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance (struct fusion_context *)instance->ctrl_context; struct megasas_cmd_fusion *cmd = NULL; - spin_lock_irqsave(&fusion->cmd_pool_lock, flags); + spin_lock_irqsave(&fusion->mpt_pool_lock, flags); if (!list_empty(&fusion->cmd_pool)) { cmd = list_entry((&fusion->cmd_pool)->next, @@ -173,7 +174,7 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance printk(KERN_ERR "megasas: Command pool (fusion) empty!\n"); } - spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags); + spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags); return cmd; } @@ -182,21 +183,47 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance * @instance: Adapter soft state * @cmd: Command packet to be returned to free command pool */ -static inline void -megasas_return_cmd_fusion(struct megasas_instance *instance, - struct megasas_cmd_fusion *cmd) +inline void megasas_return_cmd_fusion(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd) { unsigned long flags; struct fusion_context *fusion = (struct fusion_context *)instance->ctrl_context; - spin_lock_irqsave(&fusion->cmd_pool_lock, flags); + spin_lock_irqsave(&fusion->mpt_pool_lock, flags); cmd->scmd = NULL; cmd->sync_cmd_idx = (u32)ULONG_MAX; - list_add_tail(&cmd->list, &fusion->cmd_pool); + list_add(&cmd->list, (&fusion->cmd_pool)->next); - spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags); + spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags); +} + +/** + * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool + * @instance: Adapter soft state + * @cmd_mfi: MFI Command packet to be returned to free command pool + * @cmd_mpt: MPT Command packet to be returned to free command pool + */ +inline void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance, + struct megasas_cmd *cmd_mfi, + struct megasas_cmd_fusion *cmd_fusion) +{ + unsigned long flags; + + /* + * TO DO: optimize this code and use only one lock instead of two + * locks being used currently- mpt_pool_lock is acquired + * inside mfi_pool_lock + */ + spin_lock_irqsave(&instance->mfi_pool_lock, flags); + megasas_return_cmd_fusion(instance, cmd_fusion); + if (atomic_read(&cmd_mfi->mfi_mpt_pthr) != MFI_MPT_ATTACHED) + dev_err(&instance->pdev->dev, "Possible bug from %s %d\n", + __func__, __LINE__); + atomic_set(&cmd_mfi->mfi_mpt_pthr, MFI_MPT_DETACHED); + __megasas_return_cmd(instance, cmd_mfi); + spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); } /** @@ -562,9 +589,11 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, { int i; struct megasas_header *frame_hdr = &cmd->frame->hdr; + struct fusion_context *fusion; u32 msecs = seconds * 1000; + fusion = instance->ctrl_context; /* * Wait for cmd_status to change */ @@ -573,8 +602,12 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, msleep(20); } - if (frame_hdr->cmd_status == 0xff) + if (frame_hdr->cmd_status == 0xff) { + if (fusion) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); return -ETIME; + } return 0; } @@ -777,14 +810,17 @@ megasas_get_ld_map_info(struct megasas_instance *instance) dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); - if (!megasas_issue_polled(instance, cmd)) - ret = 0; - else { - printk(KERN_ERR "megasas: Get LD Map Info Failed\n"); - ret = -1; - } + if (instance->ctrl_context && !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, + MEGASAS_BLOCKED_CMD_TIMEOUT); + else + ret = megasas_issue_polled(instance, cmd); - megasas_return_cmd(instance, cmd); + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) + megasas_return_mfi_mpt_pthr(instance, cmd, + cmd->mpt_pthr_cmd_blocked); + else + megasas_return_cmd(instance, cmd); return ret; } @@ -2018,10 +2054,19 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) break; case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; + + if (!cmd_mfi->mpt_pthr_cmd_blocked) { + if (megasas_dbg_lvl == 5) + dev_info(&instance->pdev->dev, + "freeing mfi/mpt pass-through " + "from %s %d\n", + __func__, __LINE__); + megasas_return_mfi_mpt_pthr(instance, cmd_mfi, + cmd_fusion); + } + megasas_complete_cmd(instance, cmd_mfi, DID_OK); cmd_fusion->flags = 0; - megasas_return_cmd_fusion(instance, cmd_fusion); - break; } @@ -2181,6 +2226,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd; struct fusion_context *fusion; struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; + u32 opcode; cmd = megasas_get_cmd_fusion(instance); if (!cmd) @@ -2188,9 +2234,20 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, /* Save the smid. To be used for returning the cmd */ mfi_cmd->context.smid = cmd->index; - cmd->sync_cmd_idx = mfi_cmd->index; + /* Set this only for Blocked commands */ + opcode = le32_to_cpu(mfi_cmd->frame->dcmd.opcode); + if ((opcode == MR_DCMD_LD_MAP_GET_INFO) + && (mfi_cmd->frame->dcmd.mbox.b[1] == 1)) + mfi_cmd->is_wait_event = 1; + + if (opcode == MR_DCMD_CTRL_EVENT_WAIT) + mfi_cmd->is_wait_event = 1; + + if (mfi_cmd->is_wait_event) + mfi_cmd->mpt_pthr_cmd_blocked = cmd; + /* * For cmds where the flag is set, store the flag and check * on completion. For cmds with this flag, don't call @@ -2279,6 +2336,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance, printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n"); return; } + atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_ATTACHED); instance->instancet->fire_cmd(instance, req_desc->u.low, req_desc->u.high, instance->reg_set); } @@ -2750,10 +2808,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) cmd_list[cmd_fusion->sync_cmd_idx]; if (cmd_mfi->frame->dcmd.opcode == cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) { - megasas_return_cmd(instance, - cmd_mfi); - megasas_return_cmd_fusion( - instance, cmd_fusion); + megasas_return_mfi_mpt_pthr(instance, cmd_mfi, cmd_fusion); } else { req_desc = megasas_get_request_descriptor( diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 75844260fae9..0d183d521bdd 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -797,7 +797,7 @@ struct fusion_context { struct megasas_cmd_fusion **cmd_list; struct list_head cmd_pool; - spinlock_t cmd_pool_lock; + spinlock_t mpt_pool_lock; dma_addr_t req_frames_desc_phys; u8 *req_frames_desc; -- cgit From 0d9d8b9ff4453e4816b22bf729256feb6b38e0ec Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Fri, 12 Sep 2014 18:58:03 +0530 Subject: megaraid_sas : Driver version update Resending the patch. Addressed the review comments by Tomas Henzl. Driver version upgrade patch. Signed-off-by: Sumit Saxena Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/megaraid/megaraid_sas.h | 6 +++--- drivers/scsi/megaraid/megaraid_sas_base.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index f99db18c809c..a49914de4b95 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -33,9 +33,9 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "06.803.02.00-rc1" -#define MEGASAS_RELDATE "Jun. 19, 2014" -#define MEGASAS_EXT_VERSION "Thu. Jun. 19 17:00:00 PDT 2014" +#define MEGASAS_VERSION "06.805.06.00-rc1" +#define MEGASAS_RELDATE "Sep. 4, 2014" +#define MEGASAS_EXT_VERSION "Thu. Sep. 4 17:00:00 PDT 2014" /* * Device IDs diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index d8cc9227e1ef..f6a69a3b1b3f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -18,7 +18,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_base.c - * Version : 06.803.02.00-rc1 + * Version : 06.805.06.00-rc1 * * Authors: LSI Corporation * Sreenivas Bagalkote -- cgit From 6b3937227479e50032112faf74bd913f36dba2c6 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 14:18:24 +0800 Subject: arcmsr: fix command timeout under heavy load This patch rewrites the interrupt service routine relate function to fix a command timeout under heavy controller load. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr.h | 2 +- drivers/scsi/arcmsr/arcmsr_hba.c | 198 +++++++++++++++++++-------------------- 2 files changed, 98 insertions(+), 102 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 77b26f5b9c33..8f357933d8cb 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -51,7 +51,7 @@ struct device_attribute; #else #define ARCMSR_MAX_FREECCB_NUM 320 #endif -#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/08/05" +#define ARCMSR_DRIVER_VERSION "v1.30.00.04-20140428" #define ARCMSR_SCSI_INITIATOR_ID 255 #define ARCMSR_MAX_XFER_SECTORS 512 #define ARCMSR_MAX_XFER_SECTORS_B 4096 diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index b13764ca23fd..506fe7b851bc 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -1441,14 +1441,15 @@ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) uint32_t outbound_doorbell; struct MessageUnit_A __iomem *reg = acb->pmuA; outbound_doorbell = readl(®->outbound_doorbell); - writel(outbound_doorbell, ®->outbound_doorbell); - if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { - arcmsr_iop2drv_data_wrote_handle(acb); - } - - if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { - arcmsr_iop2drv_data_read_handle(acb); - } + do { + writel(outbound_doorbell, ®->outbound_doorbell); + if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) + arcmsr_iop2drv_data_wrote_handle(acb); + if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) + arcmsr_iop2drv_data_read_handle(acb); + outbound_doorbell = readl(®->outbound_doorbell); + } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK + | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)); } static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB) { @@ -1462,17 +1463,19 @@ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB) ******************************************************************* */ outbound_doorbell = readl(®->outbound_doorbell); - writel(outbound_doorbell, ®->outbound_doorbell_clear);/*clear interrupt*/ - if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { - arcmsr_iop2drv_data_wrote_handle(pACB); - } - if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { - arcmsr_iop2drv_data_read_handle(pACB); - } - if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { - arcmsr_hbc_message_isr(pACB); /* messenger of "driver to iop commands" */ - } - return; + do { + writel(outbound_doorbell, ®->outbound_doorbell_clear); + readl(®->outbound_doorbell_clear); + if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) + arcmsr_iop2drv_data_wrote_handle(pACB); + if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) + arcmsr_iop2drv_data_read_handle(pACB); + if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) + arcmsr_hbc_message_isr(pACB); + outbound_doorbell = readl(®->outbound_doorbell); + } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK + | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK + | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)); } static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) { @@ -1521,21 +1524,23 @@ static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) /* areca cdb command done */ /* Use correct offset and size for syncing */ - while (readl(&phbcmu->host_int_status) & - ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR){ - /* check if command done with no error*/ - flag_ccb = readl(&phbcmu->outbound_queueport_low); - ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);/*frame must be 32 bytes aligned*/ - arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); - ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); - error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; - /* check if command done with no error */ - arcmsr_drain_donequeue(acb, ccb, error); - if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { - writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, &phbcmu->inbound_doorbell); - break; - } - throttling++; + while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) != + 0xFFFFFFFF) { + ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + + ccb_cdb_phy); + ccb = container_of(arcmsr_cdb, struct CommandControlBlock, + arcmsr_cdb); + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) + ? true : false; + /* check if command done with no error */ + arcmsr_drain_donequeue(acb, ccb, error); + throttling++; + if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { + writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, + &phbcmu->inbound_doorbell); + throttling = 0; + } } } /* @@ -1584,21 +1589,22 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) struct MessageUnit_A __iomem *reg = acb->pmuA; outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable; - if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { - return 1; - } - writel(outbound_intstatus, ®->outbound_intstatus); - if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { - arcmsr_hba_doorbell_isr(acb); - } - if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { - arcmsr_hba_postqueue_isr(acb); - } - if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { - /* messenger of "driver to iop commands" */ - arcmsr_hba_message_isr(acb); - } - return 0; + if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) + return IRQ_NONE; + do { + writel(outbound_intstatus, ®->outbound_intstatus); + if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) + arcmsr_hba_doorbell_isr(acb); + if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) + arcmsr_hba_postqueue_isr(acb); + if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) + arcmsr_hba_message_isr(acb); + outbound_intstatus = readl(®->outbound_intstatus) & + acb->outbound_int_enable; + } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT + | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT + | ARCMSR_MU_OUTBOUND_MESSAGE0_INT)); + return IRQ_HANDLED; } static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) @@ -1608,27 +1614,25 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) outbound_doorbell = readl(reg->iop2drv_doorbell) & acb->outbound_int_enable; if (!outbound_doorbell) - return 1; - - writel(~outbound_doorbell, reg->iop2drv_doorbell); - /*in case the last action of doorbell interrupt clearance is cached, - this action can push HW to write down the clear bit*/ - readl(reg->iop2drv_doorbell); - writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); - if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { - arcmsr_iop2drv_data_wrote_handle(acb); - } - if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { - arcmsr_iop2drv_data_read_handle(acb); - } - if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { - arcmsr_hbb_postqueue_isr(acb); - } - if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { - /* messenger of "driver to iop commands" */ - arcmsr_hbb_message_isr(acb); - } - return 0; + return IRQ_NONE; + do { + writel(~outbound_doorbell, reg->iop2drv_doorbell); + writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); + if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) + arcmsr_iop2drv_data_wrote_handle(acb); + if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) + arcmsr_iop2drv_data_read_handle(acb); + if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) + arcmsr_hbb_postqueue_isr(acb); + if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) + arcmsr_hbb_message_isr(acb); + outbound_doorbell = readl(reg->iop2drv_doorbell) & + acb->outbound_int_enable; + } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK + | ARCMSR_IOP2DRV_DATA_READ_OK + | ARCMSR_IOP2DRV_CDB_DONE + | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)); + return IRQ_HANDLED; } static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB) @@ -1640,44 +1644,36 @@ static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB) ** check outbound intstatus ********************************************* */ - host_interrupt_status = readl(&phbcmu->host_int_status); - if (!host_interrupt_status) { - /*it must be share irq*/ - return 1; - } - /* MU ioctl transfer doorbell interrupts*/ - if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { - arcmsr_hbc_doorbell_isr(pACB); /* messenger of "ioctl message read write" */ - } - /* MU post queue interrupts*/ - if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { - arcmsr_hbc_postqueue_isr(pACB); /* messenger of "scsi commands" */ - } - return 0; + host_interrupt_status = readl(&phbcmu->host_int_status) & + (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR); + if (!host_interrupt_status) + return IRQ_NONE; + do { + if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) + arcmsr_hbc_doorbell_isr(pACB); + /* MU post queue interrupts*/ + if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) + arcmsr_hbc_postqueue_isr(pACB); + host_interrupt_status = readl(&phbcmu->host_int_status); + } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); + return IRQ_HANDLED; } static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { - case ACB_ADAPTER_TYPE_A: { - if (arcmsr_handle_hba_isr(acb)) { - return IRQ_NONE; - } - } + case ACB_ADAPTER_TYPE_A: + return arcmsr_handle_hba_isr(acb); break; - - case ACB_ADAPTER_TYPE_B: { - if (arcmsr_handle_hbb_isr(acb)) { - return IRQ_NONE; - } - } + case ACB_ADAPTER_TYPE_B: + return arcmsr_handle_hbb_isr(acb); break; - case ACB_ADAPTER_TYPE_C: { - if (arcmsr_handle_hbc_isr(acb)) { - return IRQ_NONE; - } - } + case ACB_ADAPTER_TYPE_C: + return arcmsr_handle_hbc_isr(acb); + default: + return IRQ_NONE; } - return IRQ_HANDLED; } static void arcmsr_iop_parking(struct AdapterControlBlock *acb) -- cgit From 1d1166ea16ac7047a1b01f20dcbcc6f7754c3c23 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 14:23:31 +0800 Subject: arcmsr: add code to support MSI-X and MSI interrupt This patch adds code to support MSI and MSI-X interrupt. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr.h | 5 +++ drivers/scsi/arcmsr/arcmsr_hba.c | 83 ++++++++++++++++++++++++++++++++++------ 2 files changed, 76 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 8f357933d8cb..1c64b60a3dcf 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -64,6 +64,7 @@ struct device_attribute; #define ARCMSR_MAX_HBB_POSTQUEUE 264 #define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */ #define ARCMSR_CDB_SG_PAGE_LENGTH 256 +#define ARCMST_NUM_MSIX_VECTORS 4 #ifndef PCI_DEVICE_ID_ARECA_1880 #define PCI_DEVICE_ID_ARECA_1880 0x1880 #endif @@ -508,6 +509,7 @@ struct AdapterControlBlock struct pci_dev * pdev; struct Scsi_Host * host; unsigned long vir2phy_offset; + struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS]; /* Offset is used in making arc cdb physical to virtual calculations */ uint32_t outbound_int_enable; uint32_t cdb_phyaddr_hi32; @@ -544,6 +546,8 @@ struct AdapterControlBlock /* iop init */ #define ACB_F_ABORT 0x0200 #define ACB_F_FIRMWARE_TRAP 0x0400 + #define ACB_F_MSI_ENABLED 0x1000 + #define ACB_F_MSIX_ENABLED 0x2000 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; /* used for memory free */ struct list_head ccb_free_list; @@ -594,6 +598,7 @@ struct AdapterControlBlock #define FW_DEADLOCK 0x0010 atomic_t rq_map_token; atomic_t ante_token_value; + int msix_vector_count; };/* HW_DEVICE_EXTENSION */ /* ******************************************************************************* diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 506fe7b851bc..60227d56bd39 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -603,6 +603,56 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work) } } +static int +arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb) +{ + int i, j, r; + struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS]; + + for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++) + entries[i].entry = i; + r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS); + if (r < 0) + goto msi_int; + acb->msix_vector_count = r; + for (i = 0; i < r; i++) { + if (request_irq(entries[i].vector, + arcmsr_do_interrupt, 0, "arcmsr", acb)) { + pr_warn("arcmsr%d: request_irq =%d failed!\n", + acb->host->host_no, entries[i].vector); + for (j = 0 ; j < i ; j++) + free_irq(entries[j].vector, acb); + pci_disable_msix(pdev); + goto msi_int; + } + acb->entries[i] = entries[i]; + } + acb->acb_flags |= ACB_F_MSIX_ENABLED; + pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no); + return SUCCESS; +msi_int: + if (pci_enable_msi_exact(pdev, 1) < 0) + goto legacy_int; + if (request_irq(pdev->irq, arcmsr_do_interrupt, + IRQF_SHARED, "arcmsr", acb)) { + pr_warn("arcmsr%d: request_irq =%d failed!\n", + acb->host->host_no, pdev->irq); + pci_disable_msi(pdev); + goto legacy_int; + } + acb->acb_flags |= ACB_F_MSI_ENABLED; + pr_info("arcmsr%d: msi enabled\n", acb->host->host_no); + return SUCCESS; +legacy_int: + if (request_irq(pdev->irq, arcmsr_do_interrupt, + IRQF_SHARED, "arcmsr", acb)) { + pr_warn("arcmsr%d: request_irq = %d failed!\n", + acb->host->host_no, pdev->irq); + return FAILED; + } + return SUCCESS; +} + static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *host; @@ -667,16 +717,13 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) if(error){ goto free_hbb_mu; } - arcmsr_iop_init(acb); error = scsi_add_host(host, &pdev->dev); if(error){ goto RAID_controller_stop; } - error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb); - if(error){ + if (arcmsr_request_irq(pdev, acb) == FAILED) goto scsi_host_remove; - } - host->irq = pdev->irq; + arcmsr_iop_init(acb); scsi_scan_host(host); INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); atomic_set(&acb->rq_map_token, 16); @@ -710,6 +757,22 @@ pci_disable_dev: return -ENODEV; } +static void arcmsr_free_irq(struct pci_dev *pdev, + struct AdapterControlBlock *acb) +{ + int i; + + if (acb->acb_flags & ACB_F_MSI_ENABLED) { + free_irq(pdev->irq, acb); + pci_disable_msi(pdev); + } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) { + for (i = 0; i < acb->msix_vector_count; i++) + free_irq(acb->entries[i].vector, acb); + pci_disable_msix(pdev); + } else + free_irq(pdev->irq, acb); +} + static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -992,6 +1055,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) } } } + static void arcmsr_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); @@ -1029,7 +1093,7 @@ static void arcmsr_remove(struct pci_dev *pdev) } } } - free_irq(pdev->irq, acb); + arcmsr_free_irq(pdev, acb); arcmsr_free_ccb_pool(acb); arcmsr_free_hbb_mu(acb); arcmsr_unmap_pciregion(acb); @@ -1045,6 +1109,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev) (struct AdapterControlBlock *)host->hostdata; del_timer_sync(&acb->eternal_timer); arcmsr_disable_outbound_ints(acb); + arcmsr_free_irq(pdev, acb); flush_work(&acb->arcmsr_do_message_isr_bh); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); @@ -2516,8 +2581,6 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_A: { if (cdb_phyaddr_hi32 != 0) { struct MessageUnit_A __iomem *reg = acb->pmuA; - uint32_t intmask_org; - intmask_org = arcmsr_disable_outbound_ints(acb); writel(ARCMSR_SIGNATURE_SET_CONFIG, \ ®->message_rwbuffer[0]); writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]); @@ -2529,7 +2592,6 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) acb->host->host_no); return 1; } - arcmsr_enable_outbound_ints(acb, intmask_org); } } break; @@ -2539,8 +2601,6 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) uint32_t __iomem *rwbuffer; struct MessageUnit_B *reg = acb->pmuB; - uint32_t intmask_org; - intmask_org = arcmsr_disable_outbound_ints(acb); reg->postq_index = 0; reg->doneq_index = 0; writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell); @@ -2569,7 +2629,6 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) return 1; } arcmsr_hbb_enable_driver_mode(acb); - arcmsr_enable_outbound_ints(acb, intmask_org); } break; case ACB_ADAPTER_TYPE_C: { -- cgit From 61cda87f33be22828c3b52863da1c456ba108d37 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 14:26:09 +0800 Subject: arcmsr: add code to support hibernation This patch adds code to support system hibernation. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr_hba.c | 76 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 60227d56bd39..b338a3b05549 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -89,11 +89,15 @@ static int arcmsr_bios_param(struct scsi_device *sdev, static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id); +static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state); +static int arcmsr_resume(struct pci_dev *pdev); static void arcmsr_remove(struct pci_dev *pdev); static void arcmsr_shutdown(struct pci_dev *pdev); static void arcmsr_iop_init(struct AdapterControlBlock *acb); static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb); static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); +static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, + u32 intmask_org); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); @@ -167,6 +171,8 @@ static struct pci_driver arcmsr_pci_driver = { .id_table = arcmsr_device_id_table, .probe = arcmsr_probe, .remove = arcmsr_remove, + .suspend = arcmsr_suspend, + .resume = arcmsr_resume, .shutdown = arcmsr_shutdown, }; /* @@ -773,6 +779,76 @@ static void arcmsr_free_irq(struct pci_dev *pdev, free_irq(pdev->irq, acb); } +static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state) +{ + uint32_t intmask_org; + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *)host->hostdata; + + intmask_org = arcmsr_disable_outbound_ints(acb); + arcmsr_free_irq(pdev, acb); + del_timer_sync(&acb->eternal_timer); + flush_work(&acb->arcmsr_do_message_isr_bh); + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); + pci_set_drvdata(pdev, host); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int arcmsr_resume(struct pci_dev *pdev) +{ + int error; + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *)host->hostdata; + + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + if (pci_enable_device(pdev)) { + pr_warn("%s: pci_enable_device error\n", __func__); + return -ENODEV; + } + error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (error) { + error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (error) { + pr_warn("scsi%d: No suitable DMA mask available\n", + host->host_no); + goto controller_unregister; + } + } + pci_set_master(pdev); + if (arcmsr_request_irq(pdev, acb) == FAILED) + goto controller_stop; + arcmsr_iop_init(acb); + INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); + atomic_set(&acb->rq_map_token, 16); + atomic_set(&acb->ante_token_value, 16); + acb->fw_flag = FW_NORMAL; + init_timer(&acb->eternal_timer); + acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ); + acb->eternal_timer.data = (unsigned long) acb; + acb->eternal_timer.function = &arcmsr_request_device_map; + add_timer(&acb->eternal_timer); + return 0; +controller_stop: + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); +controller_unregister: + scsi_remove_host(host); + arcmsr_free_ccb_pool(acb); + arcmsr_unmap_pciregion(acb); + pci_release_regions(pdev); + scsi_host_put(host); + pci_disable_device(pdev); + return -ENODEV; +} + static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; -- cgit From 3df824aff935444601101cc329ebe3f52e126a4e Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 14:29:41 +0800 Subject: arcmsr: limit max. number of SCSI command request This patch limits the max. number of SCSI commmand request to avoid command overflow. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr.h | 4 +++- drivers/scsi/arcmsr/arcmsr_hba.c | 32 ++++++++++++++++++++++---------- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 1c64b60a3dcf..0ae0ce31bb46 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -45,11 +45,12 @@ #include struct device_attribute; /*The limit of outstanding scsi command that firmware can handle*/ -#define ARCMSR_MAX_OUTSTANDING_CMD 256 #ifdef CONFIG_XEN #define ARCMSR_MAX_FREECCB_NUM 160 +#define ARCMSR_MAX_OUTSTANDING_CMD 155 #else #define ARCMSR_MAX_FREECCB_NUM 320 +#define ARCMSR_MAX_OUTSTANDING_CMD 255 #endif #define ARCMSR_DRIVER_VERSION "v1.30.00.04-20140428" #define ARCMSR_SCSI_INITIATOR_ID 255 @@ -598,6 +599,7 @@ struct AdapterControlBlock #define FW_DEADLOCK 0x0010 atomic_t rq_map_token; atomic_t ante_token_value; + uint32_t maxOutstanding; int msix_vector_count; };/* HW_DEVICE_EXTENSION */ /* diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index b338a3b05549..ed61ee283a61 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -134,7 +134,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = { .eh_bus_reset_handler = arcmsr_bus_reset, .bios_param = arcmsr_bios_param, .change_queue_depth = arcmsr_adjust_disk_queue_depth, - .can_queue = ARCMSR_MAX_FREECCB_NUM, + .can_queue = ARCMSR_MAX_OUTSTANDING_CMD, .this_id = ARCMSR_SCSI_INITIATOR_ID, .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, @@ -693,7 +693,7 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) host->max_lun = ARCMSR_MAX_TARGETLUN; host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/ host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/ - host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */ + host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD; host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN; host->this_id = ARCMSR_SCSI_INITIATOR_ID; host->unique_id = (bus << 8) | dev_fun; @@ -2216,9 +2216,6 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, arcmsr_handle_virtual_command(acb, cmd); return 0; } - if (atomic_read(&acb->ccboutstandingcount) >= - ARCMSR_MAX_OUTSTANDING_CMD) - return SCSI_MLQUEUE_HOST_BUSY; ccb = arcmsr_get_freeccb(acb); if (!ccb) return SCSI_MLQUEUE_HOST_BUSY; @@ -2428,12 +2425,27 @@ static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB) } static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) { - if (acb->adapter_type == ACB_ADAPTER_TYPE_A) - return arcmsr_get_hba_config(acb); - else if (acb->adapter_type == ACB_ADAPTER_TYPE_B) - return arcmsr_get_hbb_config(acb); + bool rtn = false; + + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: + rtn = arcmsr_get_hba_config(acb); + break; + case ACB_ADAPTER_TYPE_B: + rtn = arcmsr_get_hbb_config(acb); + break; + case ACB_ADAPTER_TYPE_C: + rtn = arcmsr_get_hbc_config(acb); + break; + default: + break; + } + if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) + acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD; else - return arcmsr_get_hbc_config(acb); + acb->maxOutstanding = acb->firm_numbers_queue - 1; + acb->host->can_queue = acb->maxOutstanding; + return rtn; } static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, -- cgit From cab5aecee60a7930ca208ee723c18be7b400cfaf Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 14:47:16 +0800 Subject: arcmsr: return status of abort command This patch fixes the wrong return status of abort command. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr_hba.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index ed61ee283a61..87f388226e83 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -2477,7 +2477,7 @@ static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, } arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5)); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); - poll_ccb_done = (ccb == poll_ccb) ? 1:0; + poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" @@ -2541,7 +2541,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, /* check if command done with no error*/ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5)); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); - poll_ccb_done = (ccb == poll_ccb) ? 1:0; + poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" @@ -2597,7 +2597,7 @@ polling_hbc_ccb_retry: ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/ pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); - poll_ccb_done = (pCCB == poll_ccb) ? 1 : 0; + poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; /* check ifcommand done with no error*/ if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { if (pCCB->startdone == ARCMSR_CCB_ABORTED) { @@ -3199,8 +3199,10 @@ static int arcmsr_abort(struct scsi_cmnd *cmd) (struct AdapterControlBlock *)cmd->device->host->hostdata; int i = 0; int rtn = FAILED; + uint32_t intmask_org; + printk(KERN_NOTICE - "arcmsr%d: abort device command of scsi id = %d lun = %d \n", + "arcmsr%d: abort device command of scsi id = %d lun = %d\n", acb->host->host_no, cmd->device->id, (u32)cmd->device->lun); acb->acb_flags |= ACB_F_ABORT; acb->num_aborts++; @@ -3210,9 +3212,12 @@ static int arcmsr_abort(struct scsi_cmnd *cmd) ** we need to handle it as soon as possible and exit ************************************************ */ - if (!atomic_read(&acb->ccboutstandingcount)) + if (!atomic_read(&acb->ccboutstandingcount)) { + acb->acb_flags &= ~ACB_F_ABORT; return rtn; + } + intmask_org = arcmsr_disable_outbound_ints(acb); for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { struct CommandControlBlock *ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { @@ -3222,6 +3227,7 @@ static int arcmsr_abort(struct scsi_cmnd *cmd) } } acb->acb_flags &= ~ACB_F_ABORT; + arcmsr_enable_outbound_ints(acb, intmask_org); return rtn; } -- cgit From 8b7c994298d7a18f45a9cec2e9b496d9784b1135 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 14:55:57 +0800 Subject: arcmsr: store adapter type in PCI id table Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr_hba.c | 81 ++++++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 87f388226e83..30b378cd80a8 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -143,29 +143,50 @@ static struct scsi_host_template arcmsr_scsi_host_template = { .shost_attrs = arcmsr_host_attrs, .no_write_same = 1, }; + static struct pci_device_id arcmsr_device_id_table[] = { - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)}, - {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200), + .driver_data = ACB_ADAPTER_TYPE_B}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201), + .driver_data = ACB_ADAPTER_TYPE_B}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202), + .driver_data = ACB_ADAPTER_TYPE_B}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880), + .driver_data = ACB_ADAPTER_TYPE_C}, {0, 0}, /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); + static struct pci_driver arcmsr_pci_driver = { .name = "arcmsr", .id_table = arcmsr_device_id_table, @@ -295,26 +316,6 @@ static int arcmsr_bios_param(struct scsi_device *sdev, return 0; } -static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb) -{ - struct pci_dev *pdev = acb->pdev; - u16 dev_id; - pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id); - acb->dev_id = dev_id; - switch (dev_id) { - case 0x1880: { - acb->adapter_type = ACB_ADAPTER_TYPE_C; - } - break; - case 0x1201: { - acb->adapter_type = ACB_ADAPTER_TYPE_B; - } - break; - - default: acb->adapter_type = ACB_ADAPTER_TYPE_A; - } -} - static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -710,7 +711,7 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) ACB_F_MESSAGE_WQBUFFER_READED); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; INIT_LIST_HEAD(&acb->ccb_free_list); - arcmsr_define_adapter_type(acb); + acb->adapter_type = id->driver_data; error = arcmsr_remap_pciregion(acb); if(!error){ goto pci_release_regs; -- cgit From 12aad94792718c6a79bbd395a650caff85f76ae4 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 14:59:00 +0800 Subject: arcmsr: revise message_isr_bh_fn to remove duplicate code Revise message_isr_bh_fn to remove the duplicate code for each adapter type. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr_hba.c | 160 +++++++++++++-------------------------- 1 file changed, 54 insertions(+), 106 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 30b378cd80a8..bfe2ac460e01 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -493,120 +493,68 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) static void arcmsr_message_isr_bh_fn(struct work_struct *work) { - struct AdapterControlBlock *acb = container_of(work,struct AdapterControlBlock, arcmsr_do_message_isr_bh); + struct AdapterControlBlock *acb = container_of(work, + struct AdapterControlBlock, arcmsr_do_message_isr_bh); + char *acb_dev_map = (char *)acb->device_map; + uint32_t __iomem *signature = NULL; + char __iomem *devicemap = NULL; + int target, lun; + struct scsi_device *psdev; + char diff, temp; + switch (acb->adapter_type) { - case ACB_ADAPTER_TYPE_A: { + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; - struct MessageUnit_A __iomem *reg = acb->pmuA; - char *acb_dev_map = (char *)acb->device_map; - uint32_t __iomem *signature = (uint32_t __iomem*) (®->message_rwbuffer[0]); - char __iomem *devicemap = (char __iomem*) (®->message_rwbuffer[21]); - int target, lun; - struct scsi_device *psdev; - char diff; - - atomic_inc(&acb->rq_map_token); - if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { - for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) { - diff = (*acb_dev_map)^readb(devicemap); - if (diff != 0) { - char temp; - *acb_dev_map = readb(devicemap); - temp =*acb_dev_map; - for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { - if((temp & 0x01)==1 && (diff & 0x01) == 1) { - scsi_add_device(acb->host, 0, target, lun); - }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) { - psdev = scsi_device_lookup(acb->host, 0, target, lun); - if (psdev != NULL ) { - scsi_remove_device(psdev); - scsi_device_put(psdev); - } - } - temp >>= 1; - diff >>= 1; - } - } - devicemap++; - acb_dev_map++; - } - } - break; - } + signature = (uint32_t __iomem *)(®->message_rwbuffer[0]); + devicemap = (char __iomem *)(®->message_rwbuffer[21]); + break; + } + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; - case ACB_ADAPTER_TYPE_B: { - struct MessageUnit_B *reg = acb->pmuB; - char *acb_dev_map = (char *)acb->device_map; - uint32_t __iomem *signature = (uint32_t __iomem*)(®->message_rwbuffer[0]); - char __iomem *devicemap = (char __iomem*)(®->message_rwbuffer[21]); - int target, lun; - struct scsi_device *psdev; - char diff; - - atomic_inc(&acb->rq_map_token); - if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { - for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) { - diff = (*acb_dev_map)^readb(devicemap); - if (diff != 0) { - char temp; - *acb_dev_map = readb(devicemap); - temp =*acb_dev_map; - for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { - if((temp & 0x01)==1 && (diff & 0x01) == 1) { - scsi_add_device(acb->host, 0, target, lun); - }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) { - psdev = scsi_device_lookup(acb->host, 0, target, lun); - if (psdev != NULL ) { - scsi_remove_device(psdev); - scsi_device_put(psdev); - } - } - temp >>= 1; - diff >>= 1; - } - } - devicemap++; - acb_dev_map++; - } - } - } + signature = (uint32_t __iomem *)(®->message_rwbuffer[0]); + devicemap = (char __iomem *)(®->message_rwbuffer[21]); break; - case ACB_ADAPTER_TYPE_C: { - struct MessageUnit_C *reg = acb->pmuC; - char *acb_dev_map = (char *)acb->device_map; - uint32_t __iomem *signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); - char __iomem *devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); - int target, lun; - struct scsi_device *psdev; - char diff; - - atomic_inc(&acb->rq_map_token); - if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { - for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { - diff = (*acb_dev_map)^readb(devicemap); - if (diff != 0) { - char temp; - *acb_dev_map = readb(devicemap); - temp = *acb_dev_map; - for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { - if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { - scsi_add_device(acb->host, 0, target, lun); - } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { - psdev = scsi_device_lookup(acb->host, 0, target, lun); - if (psdev != NULL) { - scsi_remove_device(psdev); - scsi_device_put(psdev); - } - } - temp >>= 1; - diff >>= 1; - } + } + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = acb->pmuC; + + signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); + devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); + break; + } + } + atomic_inc(&acb->rq_map_token); + if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG) + return; + for (target = 0; target < ARCMSR_MAX_TARGETID - 1; + target++) { + temp = readb(devicemap); + diff = (*acb_dev_map) ^ temp; + if (diff != 0) { + *acb_dev_map = temp; + for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; + lun++) { + if ((diff & 0x01) == 1 && + (temp & 0x01) == 1) { + scsi_add_device(acb->host, + 0, target, lun); + } else if ((diff & 0x01) == 1 + && (temp & 0x01) == 0) { + psdev = scsi_device_lookup(acb->host, + 0, target, lun); + if (psdev != NULL) { + scsi_remove_device(psdev); + scsi_device_put(psdev); } - devicemap++; - acb_dev_map++; } + temp >>= 1; + diff >>= 1; } } + devicemap++; + acb_dev_map++; } } -- cgit From a5849726bc88506e84e5754d6bfde81ba091f24d Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 15:01:28 +0800 Subject: arcmsr: remove calling arcmsr_hbb_enable_driver_mode Remove calling arcmsr_hbb_enable_driver_mode by in-lining the code into the caller. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr_hba.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index bfe2ac460e01..5dd520b3b6e2 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -858,17 +858,6 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) return rtnval; } -static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock *pacb) -{ - struct MessageUnit_B *reg = pacb->pmuB; - writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); - if (!arcmsr_hbb_wait_msgint_ready(pacb)) { - printk(KERN_ERR "arcmsr%d: can't set driver mode. \n", pacb->host->host_no); - return false; - } - return true; -} - static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) { struct scsi_cmnd *pcmd = ccb->pcmd; @@ -2665,7 +2654,12 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) timeout \n",acb->host->host_no); return 1; } - arcmsr_hbb_enable_driver_mode(acb); + writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); + if (!arcmsr_hbb_wait_msgint_ready(acb)) { + pr_err("arcmsr%d: can't set driver mode.\n", + acb->host->host_no); + return 1; + } } break; case ACB_ADAPTER_TYPE_C: { -- cgit From a2c89bbccac476d42a8526c0c59d081d9e56d0a8 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 15:03:55 +0800 Subject: arcmsr: modify printing adapter model number and F/W messages Adjust printing order of adapter model name and firmware version. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr_hba.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 5dd520b3b6e2..0707677c11a1 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -2207,10 +2207,10 @@ static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb) iop_device_map++; count--; } - printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n", + pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n", acb->host->host_no, - acb->firm_version, - acb->firm_model); + acb->firm_model, + acb->firm_version); acb->signature = readl(®->message_rwbuffer[0]); acb->firm_request_len = readl(®->message_rwbuffer[1]); acb->firm_numbers_queue = readl(®->message_rwbuffer[2]); @@ -2283,10 +2283,10 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb) count--; } - printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n", + pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n", acb->host->host_no, - acb->firm_version, - acb->firm_model); + acb->firm_model, + acb->firm_version); acb->signature = readl(®->message_rwbuffer[1]); /*firm_signature,1,00-03*/ @@ -2349,10 +2349,10 @@ static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB) iop_firm_version++; count--; } - printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n", + pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n", pACB->host->host_no, - pACB->firm_version, - pACB->firm_model); + pACB->firm_model, + pACB->firm_version); pACB->firm_request_len = readl(®->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/ pACB->firm_numbers_queue = readl(®->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/ pACB->firm_sdram_size = readl(®->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/ -- cgit From 5eb6bfa02a9dfecbb1f644a0b13b16cd3d23770b Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 15:07:35 +0800 Subject: arcmsr: clear outbound doorbell buffer completely Clear outbound doorbell buffer completely for adapter type C. This is to prevent getting bad data input from IOP before ioctl command processing starts. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr_hba.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 0707677c11a1..3363c31bb789 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -2870,11 +2870,23 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; - uint32_t outbound_doorbell; + uint32_t outbound_doorbell, i; /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = readl(®->outbound_doorbell); writel(outbound_doorbell, ®->outbound_doorbell_clear); writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell); + for (i = 0; i < 200; i++) { + msleep(20); + outbound_doorbell = readl(®->outbound_doorbell); + if (outbound_doorbell & + ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { + writel(outbound_doorbell, + ®->outbound_doorbell_clear); + writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, + ®->inbound_doorbell); + } else + break; + } } } } @@ -3102,9 +3114,7 @@ sleep: arcmsr_get_firmware_spec(acb); arcmsr_start_adapter_bgrb(acb); /* clear Qbuffer if door bell ringed */ - outbound_doorbell = readl(®->outbound_doorbell); - writel(outbound_doorbell, ®->outbound_doorbell_clear); /*clear interrupt */ - writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell); + arcmsr_clear_doorbell_queue_buffer(acb); /* enable outbound Post Queue,outbound doorbell Interrupt */ arcmsr_enable_outbound_ints(acb, intmask_org); atomic_set(&acb->rq_map_token, 16); -- cgit From 626fa32c801ed583594831051ff9fd56f2e6d261 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 15:10:12 +0800 Subject: arcmsr: rename functions and variables Rename some variable and function names for readability and consistency. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr.h | 6 +- drivers/scsi/arcmsr/arcmsr_hba.c | 234 ++++++++++++++++++++------------------- 2 files changed, 121 insertions(+), 119 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 0ae0ce31bb46..6d616beca439 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -359,7 +359,7 @@ struct ARCMSR_CDB #define ARCMSR_CDB_FLAG_ORDEREDQ 0x10 uint8_t msgPages; - uint32_t Context; + uint32_t msgContext; uint32_t DataLength; uint8_t Cdb[16]; uint8_t DeviceStatus; @@ -562,7 +562,7 @@ struct AdapterControlBlock /* dma_coherent used for memory free */ dma_addr_t dma_coherent_handle; /* dma_coherent_handle used for memory free */ - dma_addr_t dma_coherent_handle_hbb_mu; + dma_addr_t dma_coherent_handle2; unsigned int uncache_size; uint8_t rqbuffer[ARCMSR_MAX_QBUFFER]; /* data collection buffer for read from 80331 */ @@ -613,7 +613,7 @@ struct CommandControlBlock{ struct list_head list; /*x32: 8byte, x64: 16byte*/ struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */ struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/ - uint32_t cdb_phyaddr_pattern; /*x32: 4byte, x64: 4byte*/ + uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/ uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/ uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/ #define CCB_FLAG_READ 0x0000 diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 3363c31bb789..725332364092 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -99,16 +99,16 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, u32 intmask_org); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); -static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); -static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); +static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb); +static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb); static void arcmsr_request_device_map(unsigned long pacb); -static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb); -static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb); -static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb); +static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb); +static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb); +static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb); static void arcmsr_message_isr_bh_fn(struct work_struct *work); static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb); static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); -static void arcmsr_hbc_message_isr(struct AdapterControlBlock *pACB); +static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB); static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); static const char *arcmsr_info(struct Scsi_Host *); static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); @@ -201,7 +201,7 @@ static struct pci_driver arcmsr_pci_driver = { **************************************************************************** */ -static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb) +static void arcmsr_free_mu(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: @@ -210,7 +210,7 @@ static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_B:{ dma_free_coherent(&acb->pdev->dev, sizeof(struct MessageUnit_B), - acb->pmuB, acb->dma_coherent_handle_hbb_mu); + acb->pmuB, acb->dma_coherent_handle2); } } } @@ -316,7 +316,7 @@ static int arcmsr_bios_param(struct scsi_device *sdev, return 0; } -static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) +static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; int i; @@ -334,7 +334,7 @@ static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) return false; } -static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) +static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; int i; @@ -354,7 +354,7 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) return false; } -static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB) +static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB) { struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; int i; @@ -372,13 +372,13 @@ static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB) return false; } -static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) +static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; int retry_count = 30; writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); do { - if (arcmsr_hba_wait_msgint_ready(acb)) + if (arcmsr_hbaA_wait_msgint_ready(acb)) break; else { retry_count--; @@ -388,13 +388,13 @@ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) } while (retry_count != 0); } -static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) +static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; int retry_count = 30; writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell); do { - if (arcmsr_hbb_wait_msgint_ready(acb)) + if (arcmsr_hbaB_wait_msgint_ready(acb)) break; else { retry_count--; @@ -404,14 +404,14 @@ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) } while (retry_count != 0); } -static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB) +static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB) { struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); do { - if (arcmsr_hbc_wait_msgint_ready(pACB)) { + if (arcmsr_hbaC_wait_msgint_ready(pACB)) { break; } else { retry_count--; @@ -426,16 +426,16 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { - arcmsr_flush_hba_cache(acb); + arcmsr_hbaA_flush_cache(acb); } break; case ACB_ADAPTER_TYPE_B: { - arcmsr_flush_hbb_cache(acb); + arcmsr_hbaB_flush_cache(acb); } break; case ACB_ADAPTER_TYPE_C: { - arcmsr_flush_hbc_cache(acb); + arcmsr_hbaC_flush_cache(acb); } } } @@ -480,7 +480,9 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){ cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); - ccb_tmp->cdb_phyaddr_pattern = ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? cdb_phyaddr : (cdb_phyaddr >> 5)); + ccb_tmp->cdb_phyaddr = + ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? + cdb_phyaddr : (cdb_phyaddr >> 5)); acb->pccb_pool[i] = ccb_tmp; ccb_tmp->acb = acb; INIT_LIST_HEAD(&ccb_tmp->list); @@ -700,7 +702,7 @@ RAID_controller_stop: arcmsr_flush_adapter_cache(acb); arcmsr_free_ccb_pool(acb); free_hbb_mu: - arcmsr_free_hbb_mu(acb); + arcmsr_free_mu(acb); unmap_pci_region: arcmsr_unmap_pciregion(acb); pci_release_regs: @@ -798,40 +800,40 @@ controller_unregister: return -ENODEV; } -static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) +static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); - if (!arcmsr_hba_wait_msgint_ready(acb)) { + if (!arcmsr_hbaA_wait_msgint_ready(acb)) { printk(KERN_NOTICE - "arcmsr%d: wait 'abort all outstanding command' timeout \n" + "arcmsr%d: wait 'abort all outstanding command' timeout\n" , acb->host->host_no); return false; } return true; } -static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) +static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell); - if (!arcmsr_hbb_wait_msgint_ready(acb)) { + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE - "arcmsr%d: wait 'abort all outstanding command' timeout \n" + "arcmsr%d: wait 'abort all outstanding command' timeout\n" , acb->host->host_no); return false; } return true; } -static uint8_t arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *pACB) +static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB) { struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); - if (!arcmsr_hbc_wait_msgint_ready(pACB)) { + if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { printk(KERN_NOTICE - "arcmsr%d: wait 'abort all outstanding command' timeout \n" + "arcmsr%d: wait 'abort all outstanding command' timeout\n" , pACB->host->host_no); return false; } @@ -842,17 +844,17 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) uint8_t rtnval = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { - rtnval = arcmsr_abort_hba_allcmd(acb); + rtnval = arcmsr_hbaA_abort_allcmd(acb); } break; case ACB_ADAPTER_TYPE_B: { - rtnval = arcmsr_abort_hbb_allcmd(acb); + rtnval = arcmsr_hbaB_abort_allcmd(acb); } break; case ACB_ADAPTER_TYPE_C: { - rtnval = arcmsr_abort_hbc_allcmd(acb); + rtnval = arcmsr_hbaC_abort_allcmd(acb); } } return rtnval; @@ -1109,7 +1111,7 @@ static void arcmsr_remove(struct pci_dev *pdev) } arcmsr_free_irq(pdev, acb); arcmsr_free_ccb_pool(acb); - arcmsr_free_hbb_mu(acb); + arcmsr_free_mu(acb); arcmsr_unmap_pciregion(acb); pci_release_regions(pdev); scsi_host_put(host); @@ -1194,7 +1196,7 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb, arcmsr_cdb->TargetID = pcmd->device->id; arcmsr_cdb->LUN = pcmd->device->lun; arcmsr_cdb->Function = 1; - arcmsr_cdb->Context = 0; + arcmsr_cdb->msgContext = 0; memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); nseg = scsi_dma_map(pcmd); @@ -1235,7 +1237,7 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb, static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) { - uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern; + uint32_t cdb_phyaddr = ccb->cdb_phyaddr; struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; atomic_inc(&acb->ccboutstandingcount); ccb->startdone = ARCMSR_CCB_START; @@ -1244,13 +1246,12 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr struct MessageUnit_A __iomem *reg = acb->pmuA; if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) - writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, + writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, ®->inbound_queueport); - else { - writel(cdb_phyaddr_pattern, ®->inbound_queueport); - } - } + else + writel(cdb_phyaddr, ®->inbound_queueport); break; + } case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; @@ -1259,10 +1260,10 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); writel(0, ®->post_qbuffer[ending_index]); if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { - writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\ + writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, ®->post_qbuffer[index]); } else { - writel(cdb_phyaddr_pattern, ®->post_qbuffer[index]); + writel(cdb_phyaddr, ®->post_qbuffer[index]); } index++; index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ @@ -1275,7 +1276,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr uint32_t ccb_post_stamp, arc_cdb_size; arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; - ccb_post_stamp = (cdb_phyaddr_pattern | ((arc_cdb_size - 1) >> 6) | 1); + ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1); if (acb->cdb_phyaddr_hi32) { writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high); writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); @@ -1286,40 +1287,40 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr } } -static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) +static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); - if (!arcmsr_hba_wait_msgint_ready(acb)) { + if (!arcmsr_hbaA_wait_msgint_ready(acb)) { printk(KERN_NOTICE - "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" + "arcmsr%d: wait 'stop adapter background rebulid' timeout\n" , acb->host->host_no); } } -static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) +static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell); - if (!arcmsr_hbb_wait_msgint_ready(acb)) { + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE - "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" + "arcmsr%d: wait 'stop adapter background rebulid' timeout\n" , acb->host->host_no); } } -static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *pACB) +static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); - if (!arcmsr_hbc_wait_msgint_ready(pACB)) { + if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { printk(KERN_NOTICE - "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" + "arcmsr%d: wait 'stop adapter background rebulid' timeout\n" , pACB->host->host_no); } return; @@ -1328,16 +1329,16 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { - arcmsr_stop_hba_bgrb(acb); + arcmsr_hbaA_stop_bgrb(acb); } break; case ACB_ADAPTER_TYPE_B: { - arcmsr_stop_hbb_bgrb(acb); + arcmsr_hbaB_stop_bgrb(acb); } break; case ACB_ADAPTER_TYPE_C: { - arcmsr_stop_hbc_bgrb(acb); + arcmsr_hbaC_stop_bgrb(acb); } } } @@ -1515,7 +1516,7 @@ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) } } -static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) +static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb) { uint32_t outbound_doorbell; struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -1530,7 +1531,7 @@ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)); } -static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB) +static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB) { uint32_t outbound_doorbell; struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; @@ -1550,13 +1551,13 @@ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB) if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) arcmsr_iop2drv_data_read_handle(pACB); if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) - arcmsr_hbc_message_isr(pACB); + arcmsr_hbaC_message_isr(pACB); outbound_doorbell = readl(®->outbound_doorbell); } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)); } -static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) +static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb) { uint32_t flag_ccb; struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -1570,7 +1571,7 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) arcmsr_drain_donequeue(acb, pCCB, error); } } -static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) +static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb) { uint32_t index; uint32_t flag_ccb; @@ -1591,7 +1592,7 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) } } -static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) +static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) { struct MessageUnit_C *phbcmu; struct ARCMSR_CDB *arcmsr_cdb; @@ -1630,14 +1631,14 @@ static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) ** We want this in order to compare the drivemap so that we can detect newly-attached drives. ********************************************************************************** */ -static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) +static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb) { struct MessageUnit_A *reg = acb->pmuA; /*clear interrupt and message state*/ writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus); schedule_work(&acb->arcmsr_do_message_isr_bh); } -static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) +static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; @@ -1654,7 +1655,7 @@ static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) ** We want this in order to compare the drivemap so that we can detect newly-attached drives. ********************************************************************************** */ -static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) +static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb) { struct MessageUnit_C *reg = acb->pmuC; /*clear interrupt and message state*/ @@ -1662,7 +1663,7 @@ static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) schedule_work(&acb->arcmsr_do_message_isr_bh); } -static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) +static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb) { uint32_t outbound_intstatus; struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -1673,11 +1674,11 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) do { writel(outbound_intstatus, ®->outbound_intstatus); if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) - arcmsr_hba_doorbell_isr(acb); + arcmsr_hbaA_doorbell_isr(acb); if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) - arcmsr_hba_postqueue_isr(acb); + arcmsr_hbaA_postqueue_isr(acb); if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) - arcmsr_hba_message_isr(acb); + arcmsr_hbaA_message_isr(acb); outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable; } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT @@ -1686,7 +1687,7 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) return IRQ_HANDLED; } -static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) +static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb) { uint32_t outbound_doorbell; struct MessageUnit_B *reg = acb->pmuB; @@ -1702,9 +1703,9 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) arcmsr_iop2drv_data_read_handle(acb); if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) - arcmsr_hbb_postqueue_isr(acb); + arcmsr_hbaB_postqueue_isr(acb); if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) - arcmsr_hbb_message_isr(acb); + arcmsr_hbaB_message_isr(acb); outbound_doorbell = readl(reg->iop2drv_doorbell) & acb->outbound_int_enable; } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK @@ -1714,7 +1715,7 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) return IRQ_HANDLED; } -static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB) +static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB) { uint32_t host_interrupt_status; struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; @@ -1730,10 +1731,10 @@ static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB) return IRQ_NONE; do { if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) - arcmsr_hbc_doorbell_isr(pACB); + arcmsr_hbaC_doorbell_isr(pACB); /* MU post queue interrupts*/ if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) - arcmsr_hbc_postqueue_isr(pACB); + arcmsr_hbaC_postqueue_isr(pACB); host_interrupt_status = readl(&phbcmu->host_int_status); } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); @@ -1743,13 +1744,13 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: - return arcmsr_handle_hba_isr(acb); + return arcmsr_hbaA_handle_isr(acb); break; case ACB_ADAPTER_TYPE_B: - return arcmsr_handle_hbb_isr(acb); + return arcmsr_hbaB_handle_isr(acb); break; case ACB_ADAPTER_TYPE_C: - return arcmsr_handle_hbc_isr(acb); + return arcmsr_hbaC_handle_isr(acb); default: return IRQ_NONE; } @@ -2168,7 +2169,7 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, static DEF_SCSI_QCMD(arcmsr_queue_command) -static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb) +static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; char *acb_firm_model = acb->firm_model; @@ -2179,7 +2180,7 @@ static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb) char __iomem *iop_device_map = (char __iomem *)(®->message_rwbuffer[21]); int count; writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); - if (!arcmsr_hba_wait_msgint_ready(acb)) { + if (!arcmsr_hbaA_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", acb->host->host_no); return false; @@ -2219,7 +2220,7 @@ static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb) acb->firm_cfg_version = readl(®->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/ return true; } -static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb) +static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; struct pci_dev *pdev = acb->pdev; @@ -2240,7 +2241,7 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb) printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb->host->host_no); return false; } - acb->dma_coherent_handle_hbb_mu = dma_coherent_handle; + acb->dma_coherent_handle2 = dma_coherent_handle; reg = (struct MessageUnit_B *)dma_coherent; acb->pmuB = reg; reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL); @@ -2255,7 +2256,7 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb) iop_device_map = (char __iomem *)(®->message_rwbuffer[21]); /*firm_version,21,84-99*/ writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); - if (!arcmsr_hbb_wait_msgint_ready(acb)) { + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", acb->host->host_no); return false; @@ -2303,7 +2304,7 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb) return true; } -static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB) +static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB) { uint32_t intmask_org, Index, firmware_state = 0; struct MessageUnit_C *reg = pACB->pmuC; @@ -2367,13 +2368,13 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: - rtn = arcmsr_get_hba_config(acb); + rtn = arcmsr_hbaA_get_config(acb); break; case ACB_ADAPTER_TYPE_B: - rtn = arcmsr_get_hbb_config(acb); + rtn = arcmsr_hbaB_get_config(acb); break; case ACB_ADAPTER_TYPE_C: - rtn = arcmsr_get_hbc_config(acb); + rtn = arcmsr_hbaC_get_config(acb); break; default: break; @@ -2386,7 +2387,7 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) return rtn; } -static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, +static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -2442,7 +2443,7 @@ static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, return rtn; } -static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, +static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { struct MessageUnit_B *reg = acb->pmuB; @@ -2506,7 +2507,8 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, return rtn; } -static int arcmsr_polling_hbc_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) +static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb, + struct CommandControlBlock *poll_ccb) { struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; uint32_t flag_ccb, ccb_cdb_phy; @@ -2569,16 +2571,16 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { - rtn = arcmsr_polling_hba_ccbdone(acb, poll_ccb); + rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb); } break; case ACB_ADAPTER_TYPE_B: { - rtn = arcmsr_polling_hbb_ccbdone(acb, poll_ccb); + rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb); } break; case ACB_ADAPTER_TYPE_C: { - rtn = arcmsr_polling_hbc_ccbdone(acb, poll_ccb); + rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb); } } return rtn; @@ -2612,7 +2614,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]); writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \ ®->inbound_msgaddr0); - if (!arcmsr_hba_wait_msgint_ready(acb)) { + if (!arcmsr_hbaA_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: ""set ccb high \ part physical address timeout\n", acb->host->host_no); @@ -2630,12 +2632,12 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) reg->postq_index = 0; reg->doneq_index = 0; writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell); - if (!arcmsr_hbb_wait_msgint_ready(acb)) { + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \ acb->host->host_no); return 1; } - post_queue_phyaddr = acb->dma_coherent_handle_hbb_mu; + post_queue_phyaddr = acb->dma_coherent_handle2; rwbuffer = reg->message_rwbuffer; /* driver "set config" signature */ writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); @@ -2649,13 +2651,13 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) writel(1056, rwbuffer); writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell); - if (!arcmsr_hbb_wait_msgint_ready(acb)) { + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ timeout \n",acb->host->host_no); return 1; } writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); - if (!arcmsr_hbb_wait_msgint_ready(acb)) { + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { pr_err("arcmsr%d: can't set driver mode.\n", acb->host->host_no); return 1; @@ -2672,7 +2674,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]); writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); - if (!arcmsr_hbc_wait_msgint_ready(acb)) { + if (!arcmsr_hbaC_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ timeout \n", acb->host->host_no); return 1; @@ -2713,7 +2715,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) } } -static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) +static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ @@ -2735,7 +2737,7 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) return; } -static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb) +static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_B __iomem *reg = acb->pmuB; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ @@ -2757,7 +2759,7 @@ static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb) return; } -static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb) +static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_C __iomem *reg = acb->pmuC; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) { @@ -2785,48 +2787,48 @@ static void arcmsr_request_device_map(unsigned long pacb) struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { - arcmsr_request_hba_device_map(acb); + arcmsr_hbaA_request_device_map(acb); } break; case ACB_ADAPTER_TYPE_B: { - arcmsr_request_hbb_device_map(acb); + arcmsr_hbaB_request_device_map(acb); } break; case ACB_ADAPTER_TYPE_C: { - arcmsr_request_hbc_device_map(acb); + arcmsr_hbaC_request_device_map(acb); } } } -static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) +static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; acb->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0); - if (!arcmsr_hba_wait_msgint_ready(acb)) { + if (!arcmsr_hbaA_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ rebulid' timeout \n", acb->host->host_no); } } -static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) +static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; acb->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell); - if (!arcmsr_hbb_wait_msgint_ready(acb)) { + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ rebulid' timeout \n",acb->host->host_no); } } -static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *pACB) +static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; pACB->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell); - if (!arcmsr_hbc_wait_msgint_ready(pACB)) { + if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ rebulid' timeout \n", pACB->host->host_no); } @@ -2836,13 +2838,13 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: - arcmsr_start_hba_bgrb(acb); + arcmsr_hbaA_start_bgrb(acb); break; case ACB_ADAPTER_TYPE_B: - arcmsr_start_hbb_bgrb(acb); + arcmsr_hbaB_start_bgrb(acb); break; case ACB_ADAPTER_TYPE_C: - arcmsr_start_hbc_bgrb(acb); + arcmsr_hbaC_start_bgrb(acb); } } @@ -2900,7 +2902,7 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell); - if (!arcmsr_hbb_wait_msgint_ready(acb)) { + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT"); return; } -- cgit From 6e38adfc58406e7ea6f6701c49abaf046ce076a8 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 15:14:14 +0800 Subject: arcmsr: revise allocation of second dma_coherent_handle for type B This modification is for consistency with upcoming adapter type D. Both adapter type B and D have similar H/W and S/W structure. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr.h | 2 ++ drivers/scsi/arcmsr/arcmsr_hba.c | 38 ++++++++++++++++++++++++-------------- 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 6d616beca439..83c0a7dbfef3 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -507,6 +507,7 @@ struct AdapterControlBlock #define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */ #define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */ #define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */ + u32 roundup_ccbsize; struct pci_dev * pdev; struct Scsi_Host * host; unsigned long vir2phy_offset; @@ -563,6 +564,7 @@ struct AdapterControlBlock dma_addr_t dma_coherent_handle; /* dma_coherent_handle used for memory free */ dma_addr_t dma_coherent_handle2; + void *dma_coherent2; unsigned int uncache_size; uint8_t rqbuffer[ARCMSR_MAX_QBUFFER]; /* data collection buffer for read from 80331 */ diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 725332364092..fc0dfbc70feb 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -204,13 +204,10 @@ static struct pci_driver arcmsr_pci_driver = { static void arcmsr_free_mu(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { - case ACB_ADAPTER_TYPE_A: - case ACB_ADAPTER_TYPE_C: - break; case ACB_ADAPTER_TYPE_B:{ - dma_free_coherent(&acb->pdev->dev, - sizeof(struct MessageUnit_B), - acb->pmuB, acb->dma_coherent_handle2); + dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize, + acb->dma_coherent2, acb->dma_coherent_handle2); + break; } } } @@ -2236,12 +2233,18 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) char __iomem *iop_device_map; /*firm_version,21,84-99*/ int count; - dma_coherent = dma_alloc_coherent(&pdev->dev, sizeof(struct MessageUnit_B), &dma_coherent_handle, GFP_KERNEL); + + acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize, + &dma_coherent_handle, GFP_KERNEL); if (!dma_coherent){ - printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb->host->host_no); + printk(KERN_NOTICE + "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", + acb->host->host_no); return false; } acb->dma_coherent_handle2 = dma_coherent_handle; + acb->dma_coherent2 = dma_coherent; reg = (struct MessageUnit_B *)dma_coherent; acb->pmuB = reg; reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL); @@ -2589,6 +2592,7 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) { uint32_t cdb_phyaddr, cdb_phyaddr_hi32; + dma_addr_t dma_coherent_handle; /* ******************************************************************** @@ -2596,8 +2600,16 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) ** if freeccb.HighPart is not zero ******************************************************************** */ - cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle); - cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle); + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_B: + dma_coherent_handle = acb->dma_coherent_handle2; + break; + default: + dma_coherent_handle = acb->dma_coherent_handle; + break; + } + cdb_phyaddr = lower_32_bits(dma_coherent_handle); + cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle); acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32; /* *********************************************************************** @@ -2625,7 +2637,6 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) break; case ACB_ADAPTER_TYPE_B: { - unsigned long post_queue_phyaddr; uint32_t __iomem *rwbuffer; struct MessageUnit_B *reg = acb->pmuB; @@ -2637,16 +2648,15 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) acb->host->host_no); return 1; } - post_queue_phyaddr = acb->dma_coherent_handle2; rwbuffer = reg->message_rwbuffer; /* driver "set config" signature */ writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); /* normal should be zero */ writel(cdb_phyaddr_hi32, rwbuffer++); /* postQ size (256 + 8)*4 */ - writel(post_queue_phyaddr, rwbuffer++); + writel(cdb_phyaddr, rwbuffer++); /* doneQ size (256 + 8)*4 */ - writel(post_queue_phyaddr + 1056, rwbuffer++); + writel(cdb_phyaddr + 1056, rwbuffer++); /* ccb maxQ size must be --> [(256 + 8)*4]*/ writel(1056, rwbuffer); -- cgit From bb263c4ecbb186fe394c6c9acc32d8c59b6a7bdd Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 15:17:45 +0800 Subject: arcmsr: fix ioctl data read/write error for adapter type C Rewrite ioctl entry and its relate function. This patch fix ioctl data read/write error and change data I/O access from byte to Dword. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr.h | 8 +- drivers/scsi/arcmsr/arcmsr_attr.c | 101 +++++-- drivers/scsi/arcmsr/arcmsr_hba.c | 572 ++++++++++++++++++++++++-------------- 3 files changed, 442 insertions(+), 239 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 83c0a7dbfef3..799393ede1cc 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -518,6 +518,8 @@ struct AdapterControlBlock uint32_t reg_mu_acc_handle0; spinlock_t eh_lock; spinlock_t ccblist_lock; + spinlock_t rqbuffer_lock; + spinlock_t wqbuffer_lock; union { struct MessageUnit_A __iomem *pmuA; struct MessageUnit_B *pmuB; @@ -693,8 +695,10 @@ struct SENSE_DATA #define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01 #define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F -extern void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *); -extern void arcmsr_iop_message_read(struct AdapterControlBlock *); +extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *); +extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *, + struct QBUFFER __iomem *); +extern void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *); extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *); extern struct device_attribute *arcmsr_host_attrs[]; extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *); diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index acdae33de521..16422adcc531 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -70,40 +70,75 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp, struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; uint8_t *pQbuffer,*ptmpQbuffer; int32_t allxfer_len = 0; + unsigned long flags; if (!capable(CAP_SYS_ADMIN)) return -EACCES; /* do message unit read. */ ptmpQbuffer = (uint8_t *)buf; - while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) - && (allxfer_len < 1031)) { + spin_lock_irqsave(&acb->rqbuffer_lock, flags); + if (acb->rqbuf_firstindex != acb->rqbuf_lastindex) { pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; - memcpy(ptmpQbuffer, pQbuffer, 1); - acb->rqbuf_firstindex++; - acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; - ptmpQbuffer++; - allxfer_len++; + if (acb->rqbuf_firstindex > acb->rqbuf_lastindex) { + if ((ARCMSR_MAX_QBUFFER - acb->rqbuf_firstindex) >= 1032) { + memcpy(ptmpQbuffer, pQbuffer, 1032); + acb->rqbuf_firstindex += 1032; + acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; + allxfer_len = 1032; + } else { + if (((ARCMSR_MAX_QBUFFER - acb->rqbuf_firstindex) + + acb->rqbuf_lastindex) > 1032) { + memcpy(ptmpQbuffer, pQbuffer, + ARCMSR_MAX_QBUFFER + - acb->rqbuf_firstindex); + ptmpQbuffer += ARCMSR_MAX_QBUFFER + - acb->rqbuf_firstindex; + memcpy(ptmpQbuffer, acb->rqbuffer, 1032 + - (ARCMSR_MAX_QBUFFER - + acb->rqbuf_firstindex)); + acb->rqbuf_firstindex = 1032 - + (ARCMSR_MAX_QBUFFER - + acb->rqbuf_firstindex); + allxfer_len = 1032; + } else { + memcpy(ptmpQbuffer, pQbuffer, + ARCMSR_MAX_QBUFFER - + acb->rqbuf_firstindex); + ptmpQbuffer += ARCMSR_MAX_QBUFFER - + acb->rqbuf_firstindex; + memcpy(ptmpQbuffer, acb->rqbuffer, + acb->rqbuf_lastindex); + allxfer_len = ARCMSR_MAX_QBUFFER - + acb->rqbuf_firstindex + + acb->rqbuf_lastindex; + acb->rqbuf_firstindex = + acb->rqbuf_lastindex; + } + } + } else { + if ((acb->rqbuf_lastindex - acb->rqbuf_firstindex) > 1032) { + memcpy(ptmpQbuffer, pQbuffer, 1032); + acb->rqbuf_firstindex += 1032; + allxfer_len = 1032; + } else { + memcpy(ptmpQbuffer, pQbuffer, acb->rqbuf_lastindex + - acb->rqbuf_firstindex); + allxfer_len = acb->rqbuf_lastindex - + acb->rqbuf_firstindex; + acb->rqbuf_firstindex = acb->rqbuf_lastindex; + } + } } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER __iomem *prbuffer; - uint8_t __iomem *iop_data; - int32_t iop_len; - acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); - iop_data = prbuffer->data; - iop_len = readl(&prbuffer->data_len); - while (iop_len > 0) { - acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); - acb->rqbuf_lastindex++; - acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; - iop_data++; - iop_len--; - } - arcmsr_iop_message_read(acb); + if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) + acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } - return (allxfer_len); + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); + return allxfer_len; } static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp, @@ -117,6 +152,7 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp, struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; uint8_t *pQbuffer, *ptmpuserbuffer; + unsigned long flags; if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -125,18 +161,19 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp, /* do message unit write. */ ptmpuserbuffer = (uint8_t *)buf; user_len = (int32_t)count; + spin_lock_irqsave(&acb->wqbuffer_lock, flags); wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if (wqbuf_lastindex != wqbuf_firstindex) { - arcmsr_post_ioctldata2iop(acb); + arcmsr_write_ioctldata2iop(acb); + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); return 0; /*need retry*/ } else { my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) - &(ARCMSR_MAX_QBUFFER - 1); + &(ARCMSR_MAX_QBUFFER - 1); if (my_empty_len >= user_len) { while (user_len > 0) { - pQbuffer = - &acb->wqbuffer[acb->wqbuf_lastindex]; + pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; memcpy(pQbuffer, ptmpuserbuffer, 1); acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; @@ -146,10 +183,12 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp, if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; - arcmsr_post_ioctldata2iop(acb); + arcmsr_write_ioctldata2iop(acb); } + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); return count; } else { + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); return 0; /*need retry*/ } } @@ -165,22 +204,24 @@ static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp, struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; uint8_t *pQbuffer; + unsigned long flags; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { - acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; - arcmsr_iop_message_read(acb); - } + arcmsr_clear_iop2drv_rqueue_buffer(acb); acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); + spin_lock_irqsave(&acb->rqbuffer_lock, flags); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); + spin_lock_irqsave(&acb->wqbuffer_lock, flags); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pQbuffer = acb->wqbuffer; diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index fc0dfbc70feb..1576805efc7d 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -653,6 +653,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) } spin_lock_init(&acb->eh_lock); spin_lock_init(&acb->ccblist_lock); + spin_lock_init(&acb->rqbuffer_lock); + spin_lock_init(&acb->wqbuffer_lock); acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); @@ -1449,68 +1451,175 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc return pqbuffer; } -static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) +static uint32_t +arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb, + struct QBUFFER __iomem *prbuffer) { - struct QBUFFER __iomem *prbuffer; - struct QBUFFER *pQbuffer; - uint8_t __iomem *iop_data; - int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; - rqbuf_lastindex = acb->rqbuf_lastindex; - rqbuf_firstindex = acb->rqbuf_firstindex; - prbuffer = arcmsr_get_iop_rqbuffer(acb); - iop_data = (uint8_t __iomem *)prbuffer->data; - iop_len = prbuffer->data_len; - my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1); - - if (my_empty_len >= iop_len) - { - while (iop_len > 0) { - pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex]; - memcpy(pQbuffer, iop_data, 1); - rqbuf_lastindex++; - rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; + uint8_t *pQbuffer; + uint8_t *buf1 = NULL; + uint32_t __iomem *iop_data; + uint32_t iop_len, data_len, *buf2 = NULL; + + iop_data = (uint32_t __iomem *)prbuffer->data; + iop_len = readl(&prbuffer->data_len); + if (iop_len > 0) { + buf1 = kmalloc(128, GFP_ATOMIC); + buf2 = (uint32_t *)buf1; + if (buf1 == NULL) + return 0; + data_len = iop_len; + while (data_len >= 4) { + *buf2++ = readl(iop_data); iop_data++; - iop_len--; + data_len -= 4; } - acb->rqbuf_lastindex = rqbuf_lastindex; - arcmsr_iop_message_read(acb); + if (data_len) + *buf2 = readl(iop_data); + buf2 = (uint32_t *)buf1; + } + while (iop_len > 0) { + pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; + *pQbuffer = *buf1; + acb->rqbuf_lastindex++; + /* if last, index number set it to 0 */ + acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; + buf1++; + iop_len--; + } + if (buf2) + kfree(buf2); + /* let IOP know data has been read */ + arcmsr_iop_message_read(acb); + return 1; +} + +uint32_t +arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, + struct QBUFFER __iomem *prbuffer) { + + uint8_t *pQbuffer; + uint8_t __iomem *iop_data; + uint32_t iop_len; + + if (acb->adapter_type & ACB_ADAPTER_TYPE_C) + return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer); + iop_data = (uint8_t __iomem *)prbuffer->data; + iop_len = readl(&prbuffer->data_len); + while (iop_len > 0) { + pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; + *pQbuffer = readb(iop_data); + acb->rqbuf_lastindex++; + acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; + iop_data++; + iop_len--; } + arcmsr_iop_message_read(acb); + return 1; +} - else { +static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) +{ + unsigned long flags; + struct QBUFFER __iomem *prbuffer; + int32_t buf_empty_len; + + spin_lock_irqsave(&acb->rqbuffer_lock, flags); + prbuffer = arcmsr_get_iop_rqbuffer(acb); + buf_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) & + (ARCMSR_MAX_QBUFFER - 1); + if (buf_empty_len >= readl(&prbuffer->data_len)) { + if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) + acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; + } else acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); +} + +static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb) +{ + uint8_t *pQbuffer; + struct QBUFFER __iomem *pwbuffer; + uint8_t *buf1 = NULL; + uint32_t __iomem *iop_data; + uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data; + + if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { + buf1 = kmalloc(128, GFP_ATOMIC); + buf2 = (uint32_t *)buf1; + if (buf1 == NULL) + return; + + acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); + pwbuffer = arcmsr_get_iop_wqbuffer(acb); + iop_data = (uint32_t __iomem *)pwbuffer->data; + while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) + && (allxfer_len < 124)) { + pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; + *buf1 = *pQbuffer; + acb->wqbuf_firstindex++; + acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; + buf1++; + allxfer_len++; + } + data_len = allxfer_len; + buf1 = (uint8_t *)buf2; + while (data_len >= 4) { + data = *buf2++; + writel(data, iop_data); + iop_data++; + data_len -= 4; + } + if (data_len) { + data = *buf2; + writel(data, iop_data); + } + writel(allxfer_len, &pwbuffer->data_len); + kfree(buf1); + arcmsr_iop_message_wrote(acb); } } -static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) +void +arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb) { - acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; - if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) { - uint8_t *pQbuffer; - struct QBUFFER __iomem *pwbuffer; - uint8_t __iomem *iop_data; - int32_t allxfer_len = 0; + uint8_t *pQbuffer; + struct QBUFFER __iomem *pwbuffer; + uint8_t __iomem *iop_data; + int32_t allxfer_len = 0; + if (acb->adapter_type & ACB_ADAPTER_TYPE_C) { + arcmsr_write_ioctldata2iop_in_DWORD(acb); + return; + } + if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (uint8_t __iomem *)pwbuffer->data; - - while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \ - (allxfer_len < 124)) { + while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) + && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; - memcpy(iop_data, pQbuffer, 1); + writeb(*pQbuffer, iop_data); acb->wqbuf_firstindex++; acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; iop_data++; allxfer_len++; } - pwbuffer->data_len = allxfer_len; - + writel(allxfer_len, &pwbuffer->data_len); arcmsr_iop_message_wrote(acb); } +} + +static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) +{ + unsigned long flags; - if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) { + spin_lock_irqsave(&acb->wqbuffer_lock, flags); + acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; + if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) + arcmsr_write_ioctldata2iop(acb); + if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; - } + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); } static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb) @@ -1768,296 +1877,345 @@ static void arcmsr_iop_parking(struct AdapterControlBlock *acb) } } -void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) + +void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb) { - int32_t wqbuf_firstindex, wqbuf_lastindex; - uint8_t *pQbuffer; - struct QBUFFER __iomem *pwbuffer; - uint8_t __iomem *iop_data; - int32_t allxfer_len = 0; - pwbuffer = arcmsr_get_iop_wqbuffer(acb); - iop_data = (uint8_t __iomem *)pwbuffer->data; - if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { - acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); - wqbuf_firstindex = acb->wqbuf_firstindex; - wqbuf_lastindex = acb->wqbuf_lastindex; - while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) { - pQbuffer = &acb->wqbuffer[wqbuf_firstindex]; - memcpy(iop_data, pQbuffer, 1); - wqbuf_firstindex++; - wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; - iop_data++; - allxfer_len++; + uint32_t i; + + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + for (i = 0; i < 15; i++) { + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; + acb->rqbuf_firstindex = 0; + acb->rqbuf_lastindex = 0; + arcmsr_iop_message_read(acb); + mdelay(30); + } else if (acb->rqbuf_firstindex != + acb->rqbuf_lastindex) { + acb->rqbuf_firstindex = 0; + acb->rqbuf_lastindex = 0; + mdelay(30); + } else + break; } - acb->wqbuf_firstindex = wqbuf_firstindex; - pwbuffer->data_len = allxfer_len; - arcmsr_iop_message_wrote(acb); } } static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, - struct scsi_cmnd *cmd) + struct scsi_cmnd *cmd) { - struct CMD_MESSAGE_FIELD *pcmdmessagefld; - int retvalue = 0, transfer_len = 0; char *buffer; + unsigned short use_sg; + int retvalue = 0, transfer_len = 0; + unsigned long flags; + struct CMD_MESSAGE_FIELD *pcmdmessagefld; + uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 | + (uint32_t)cmd->cmnd[6] << 16 | + (uint32_t)cmd->cmnd[7] << 8 | + (uint32_t)cmd->cmnd[8]; struct scatterlist *sg; - uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 | - (uint32_t ) cmd->cmnd[6] << 16 | - (uint32_t ) cmd->cmnd[7] << 8 | - (uint32_t ) cmd->cmnd[8]; - /* 4 bytes: Areca io control code */ + + use_sg = scsi_sg_count(cmd); sg = scsi_sglist(cmd); buffer = kmap_atomic(sg_page(sg)) + sg->offset; - if (scsi_sg_count(cmd) > 1) { + if (use_sg > 1) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } transfer_len += sg->length; - if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { retvalue = ARCMSR_MESSAGE_FAIL; + pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__); goto message_out; } - pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; - switch(controlcode) { - + pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer; + switch (controlcode) { case ARCMSR_MESSAGE_READ_RQBUFFER: { unsigned char *ver_addr; uint8_t *pQbuffer, *ptmpQbuffer; - int32_t allxfer_len = 0; - + uint32_t allxfer_len = 0; ver_addr = kmalloc(1032, GFP_ATOMIC); if (!ver_addr) { retvalue = ARCMSR_MESSAGE_FAIL; + pr_info("%s: memory not enough!\n", __func__); goto message_out; } - ptmpQbuffer = ver_addr; - while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) - && (allxfer_len < 1031)) { + spin_lock_irqsave(&acb->rqbuffer_lock, flags); + if (acb->rqbuf_firstindex != acb->rqbuf_lastindex) { pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; - memcpy(ptmpQbuffer, pQbuffer, 1); - acb->rqbuf_firstindex++; - acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; - ptmpQbuffer++; - allxfer_len++; + if (acb->rqbuf_firstindex > acb->rqbuf_lastindex) { + if ((ARCMSR_MAX_QBUFFER - + acb->rqbuf_firstindex) >= 1032) { + memcpy(ptmpQbuffer, pQbuffer, 1032); + acb->rqbuf_firstindex += 1032; + acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; + allxfer_len = 1032; + } else { + if (((ARCMSR_MAX_QBUFFER - + acb->rqbuf_firstindex) + + acb->rqbuf_lastindex) > 1032) { + memcpy(ptmpQbuffer, + pQbuffer, ARCMSR_MAX_QBUFFER + - acb->rqbuf_firstindex); + ptmpQbuffer += + ARCMSR_MAX_QBUFFER - + acb->rqbuf_firstindex; + memcpy(ptmpQbuffer, + acb->rqbuffer, 1032 - + (ARCMSR_MAX_QBUFFER + - acb->rqbuf_firstindex)); + acb->rqbuf_firstindex = + 1032 - (ARCMSR_MAX_QBUFFER + - acb->rqbuf_firstindex); + allxfer_len = 1032; + } else { + memcpy(ptmpQbuffer, + pQbuffer, ARCMSR_MAX_QBUFFER + - acb->rqbuf_firstindex); + ptmpQbuffer += + ARCMSR_MAX_QBUFFER - + acb->rqbuf_firstindex; + memcpy(ptmpQbuffer, + acb->rqbuffer, + acb->rqbuf_lastindex); + allxfer_len = ARCMSR_MAX_QBUFFER + - acb->rqbuf_firstindex + + acb->rqbuf_lastindex; + acb->rqbuf_firstindex = + acb->rqbuf_lastindex; + } + } + } else { + if ((acb->rqbuf_lastindex - + acb->rqbuf_firstindex) > 1032) { + memcpy(ptmpQbuffer, pQbuffer, 1032); + acb->rqbuf_firstindex += 1032; + allxfer_len = 1032; + } else { + memcpy(ptmpQbuffer, pQbuffer, + acb->rqbuf_lastindex - + acb->rqbuf_firstindex); + allxfer_len = acb->rqbuf_lastindex + - acb->rqbuf_firstindex; + acb->rqbuf_firstindex = + acb->rqbuf_lastindex; + } + } } + memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, + allxfer_len); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { - struct QBUFFER __iomem *prbuffer; - uint8_t __iomem *iop_data; - int32_t iop_len; - acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); - iop_data = prbuffer->data; - iop_len = readl(&prbuffer->data_len); - while (iop_len > 0) { - acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); - acb->rqbuf_lastindex++; - acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; - iop_data++; - iop_len--; - } - arcmsr_iop_message_read(acb); - } - memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len); - pcmdmessagefld->cmdmessage.Length = allxfer_len; - if(acb->fw_flag == FW_DEADLOCK) { - pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; - }else{ - pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; + if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) + acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); kfree(ver_addr); - } + pcmdmessagefld->cmdmessage.Length = allxfer_len; + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; break; - + } case ARCMSR_MESSAGE_WRITE_WQBUFFER: { unsigned char *ver_addr; int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; uint8_t *pQbuffer, *ptmpuserbuffer; - ver_addr = kmalloc(1032, GFP_ATOMIC); if (!ver_addr) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } - if(acb->fw_flag == FW_DEADLOCK) { - pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; - }else{ - pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_OK; - } ptmpuserbuffer = ver_addr; user_len = pcmdmessagefld->cmdmessage.Length; - memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); + memcpy(ptmpuserbuffer, + pcmdmessagefld->messagedatabuffer, user_len); + spin_lock_irqsave(&acb->wqbuffer_lock, flags); wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if (wqbuf_lastindex != wqbuf_firstindex) { struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)cmd->sense_buffer; - arcmsr_post_ioctldata2iop(acb); + arcmsr_write_ioctldata2iop(acb); /* has error report sensedata */ - sensebuffer->ErrorCode = 0x70; + sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; sensebuffer->SenseKey = ILLEGAL_REQUEST; sensebuffer->AdditionalSenseLength = 0x0A; sensebuffer->AdditionalSenseCode = 0x20; sensebuffer->Valid = 1; retvalue = ARCMSR_MESSAGE_FAIL; } else { - my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) - &(ARCMSR_MAX_QBUFFER - 1); + my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) + & (ARCMSR_MAX_QBUFFER - 1); if (my_empty_len >= user_len) { while (user_len > 0) { - pQbuffer = - &acb->wqbuffer[acb->wqbuf_lastindex]; - memcpy(pQbuffer, ptmpuserbuffer, 1); - acb->wqbuf_lastindex++; - acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; - ptmpuserbuffer++; - user_len--; + pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; + if ((acb->wqbuf_lastindex + user_len) + > ARCMSR_MAX_QBUFFER) { + memcpy(pQbuffer, ptmpuserbuffer, + ARCMSR_MAX_QBUFFER - + acb->wqbuf_lastindex); + ptmpuserbuffer += + (ARCMSR_MAX_QBUFFER + - acb->wqbuf_lastindex); + user_len -= (ARCMSR_MAX_QBUFFER + - acb->wqbuf_lastindex); + acb->wqbuf_lastindex = 0; + } else { + memcpy(pQbuffer, ptmpuserbuffer, + user_len); + acb->wqbuf_lastindex += user_len; + acb->wqbuf_lastindex %= + ARCMSR_MAX_QBUFFER; + user_len = 0; + } } - if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { + if (acb->acb_flags & + ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; - arcmsr_post_ioctldata2iop(acb); + arcmsr_write_ioctldata2iop(acb); } } else { - /* has error report sensedata */ struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)cmd->sense_buffer; - sensebuffer->ErrorCode = 0x70; + /* has error report sensedata */ + sensebuffer->ErrorCode = + SCSI_SENSE_CURRENT_ERRORS; sensebuffer->SenseKey = ILLEGAL_REQUEST; sensebuffer->AdditionalSenseLength = 0x0A; sensebuffer->AdditionalSenseCode = 0x20; sensebuffer->Valid = 1; retvalue = ARCMSR_MESSAGE_FAIL; } - } - kfree(ver_addr); } + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); + kfree(ver_addr); + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; break; - + } case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { uint8_t *pQbuffer = acb->rqbuffer; - if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { - acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; - arcmsr_iop_message_read(acb); - } + + arcmsr_clear_iop2drv_rqueue_buffer(acb); + spin_lock_irqsave(&acb->rqbuffer_lock, flags); acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); - if(acb->fw_flag == FW_DEADLOCK) { + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); + if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; - }else{ + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_OK; - } - } + ARCMSR_MESSAGE_RETURNCODE_OK; break; - + } case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { uint8_t *pQbuffer = acb->wqbuffer; - if(acb->fw_flag == FW_DEADLOCK) { - pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; - }else{ - pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_OK; - } - - if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { - acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; - arcmsr_iop_message_read(acb); - } - acb->acb_flags |= - (ACB_F_MESSAGE_WQBUFFER_CLEARED | - ACB_F_MESSAGE_WQBUFFER_READED); + spin_lock_irqsave(&acb->wqbuffer_lock, flags); + acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | + ACB_F_MESSAGE_WQBUFFER_READED); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); - } + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; break; - + } case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { uint8_t *pQbuffer; - - if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { - acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; - arcmsr_iop_message_read(acb); - } - acb->acb_flags |= - (ACB_F_MESSAGE_WQBUFFER_CLEARED - | ACB_F_MESSAGE_RQBUFFER_CLEARED - | ACB_F_MESSAGE_WQBUFFER_READED); + arcmsr_clear_iop2drv_rqueue_buffer(acb); + spin_lock_irqsave(&acb->rqbuffer_lock, flags); + acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; - acb->wqbuf_firstindex = 0; - acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); + spin_lock_irqsave(&acb->wqbuffer_lock, flags); + acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | + ACB_F_MESSAGE_WQBUFFER_READED); + acb->wqbuf_firstindex = 0; + acb->wqbuf_lastindex = 0; pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); - if(acb->fw_flag == FW_DEADLOCK) { + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); + if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; - }else{ + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_OK; - } - } + ARCMSR_MESSAGE_RETURNCODE_OK; break; - + } case ARCMSR_MESSAGE_RETURN_CODE_3F: { - if(acb->fw_flag == FW_DEADLOCK) { + if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; - }else{ + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_3F; - } + ARCMSR_MESSAGE_RETURNCODE_3F; break; - } + } case ARCMSR_MESSAGE_SAY_HELLO: { int8_t *hello_string = "Hello! I am ARCMSR"; - if(acb->fw_flag == FW_DEADLOCK) { + if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; - }else{ + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_OK; - } - memcpy(pcmdmessagefld->messagedatabuffer, hello_string - , (int16_t)strlen(hello_string)); - } + ARCMSR_MESSAGE_RETURNCODE_OK; + memcpy(pcmdmessagefld->messagedatabuffer, + hello_string, (int16_t)strlen(hello_string)); break; - - case ARCMSR_MESSAGE_SAY_GOODBYE: - if(acb->fw_flag == FW_DEADLOCK) { + } + case ARCMSR_MESSAGE_SAY_GOODBYE: { + if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; - } + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; arcmsr_iop_parking(acb); break; - - case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: - if(acb->fw_flag == FW_DEADLOCK) { + } + case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { + if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = - ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; - } + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; arcmsr_flush_adapter_cache(acb); break; - + } default: retvalue = ARCMSR_MESSAGE_FAIL; + pr_info("%s: unknown controlcode!\n", __func__); + } +message_out: + if (use_sg) { + struct scatterlist *sg = scsi_sglist(cmd); + kunmap_atomic(buffer - sg->offset); } - message_out: - sg = scsi_sglist(cmd); - kunmap_atomic(buffer - sg->offset); return retvalue; } -- cgit From c10b1d544aaaf98ab1792845c106471ee1ee0c05 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 15:20:31 +0800 Subject: arcmsr: fix sparse warnings and errors Fix sparse utility checking errors and warnings. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr_hba.c | 78 ++++++++++++++++++++-------------------- 1 file changed, 40 insertions(+), 38 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 1576805efc7d..34a43edcf078 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -78,7 +78,7 @@ MODULE_VERSION(ARCMSR_DRIVER_VERSION); #define ARCMSR_SLEEPTIME 10 #define ARCMSR_RETRYCOUNT 12 -wait_queue_head_t wait_q; +static wait_queue_head_t wait_q; static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd); static int arcmsr_iop_confirm(struct AdapterControlBlock *acb); @@ -353,7 +353,7 @@ static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb) static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB) { - struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; + struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; int i; for (i = 0; i < 2000; i++) { @@ -403,7 +403,7 @@ static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb) static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB) { - struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; + struct MessageUnit_C __iomem *reg = pACB->pmuC; int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); @@ -827,7 +827,7 @@ static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb) } static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB) { - struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; + struct MessageUnit_C __iomem *reg = pACB->pmuC; writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { @@ -915,7 +915,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) } break; case ACB_ADAPTER_TYPE_C:{ - struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; + struct MessageUnit_C __iomem *reg = acb->pmuC; /* disable all outbound interrupt */ orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */ writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask); @@ -1039,8 +1039,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) /*clear all outbound posted Q*/ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */ for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { - if ((flag_ccb = readl(®->done_qbuffer[i])) != 0) { - writel(0, ®->done_qbuffer[i]); + flag_ccb = reg->done_qbuffer[i]; + if (flag_ccb != 0) { + reg->done_qbuffer[i] = 0; pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; @@ -1053,7 +1054,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) } break; case ACB_ADAPTER_TYPE_C: { - struct MessageUnit_C *reg = acb->pmuC; + struct MessageUnit_C __iomem *reg = acb->pmuC; struct ARCMSR_CDB *pARCMSR_CDB; uint32_t flag_ccb, ccb_cdb_phy; bool error; @@ -1171,7 +1172,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, } break; case ACB_ADAPTER_TYPE_C: { - struct MessageUnit_C *reg = acb->pmuC; + struct MessageUnit_C __iomem *reg = acb->pmuC; mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); writel(intmask_org & mask, ®->host_int_mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; @@ -1257,12 +1258,12 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr uint32_t ending_index, index = reg->postq_index; ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); - writel(0, ®->post_qbuffer[ending_index]); + reg->post_qbuffer[ending_index] = 0; if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { - writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, - ®->post_qbuffer[index]); + reg->post_qbuffer[index] = + cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE; } else { - writel(cdb_phyaddr, ®->post_qbuffer[index]); + reg->post_qbuffer[index] = cdb_phyaddr; } index++; index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ @@ -1271,7 +1272,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr } break; case ACB_ADAPTER_TYPE_C: { - struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC; + struct MessageUnit_C __iomem *phbcmu = acb->pmuC; uint32_t ccb_post_stamp, arc_cdb_size; arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; @@ -1313,7 +1314,7 @@ static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb) static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB) { - struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; + struct MessageUnit_C __iomem *reg = pACB->pmuC; pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); @@ -1347,7 +1348,7 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle); } -void arcmsr_iop_message_read(struct AdapterControlBlock *acb) +static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { @@ -1419,7 +1420,7 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) } break; case ACB_ADAPTER_TYPE_C: { - struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC; + struct MessageUnit_C __iomem *phbcmu = acb->pmuC; qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer; } } @@ -1443,7 +1444,7 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc } break; case ACB_ADAPTER_TYPE_C: { - struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; + struct MessageUnit_C __iomem *reg = acb->pmuC; pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer; } @@ -1640,7 +1641,7 @@ static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb) static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB) { uint32_t outbound_doorbell; - struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; + struct MessageUnit_C __iomem *reg = pACB->pmuC; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not @@ -1686,8 +1687,8 @@ static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb) struct CommandControlBlock *pCCB; bool error; index = reg->doneq_index; - while ((flag_ccb = readl(®->done_qbuffer[index])) != 0) { - writel(0, ®->done_qbuffer[index]); + while ((flag_ccb = reg->done_qbuffer[index]) != 0) { + reg->done_qbuffer[index] = 0; pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; @@ -1700,13 +1701,13 @@ static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb) static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) { - struct MessageUnit_C *phbcmu; + struct MessageUnit_C __iomem *phbcmu; struct ARCMSR_CDB *arcmsr_cdb; struct CommandControlBlock *ccb; uint32_t flag_ccb, ccb_cdb_phy, throttling = 0; int error; - phbcmu = (struct MessageUnit_C *)acb->pmuC; + phbcmu = acb->pmuC; /* areca cdb command done */ /* Use correct offset and size for syncing */ @@ -1739,7 +1740,7 @@ static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) */ static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb) { - struct MessageUnit_A *reg = acb->pmuA; + struct MessageUnit_A __iomem *reg = acb->pmuA; /*clear interrupt and message state*/ writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus); schedule_work(&acb->arcmsr_do_message_isr_bh); @@ -1763,7 +1764,7 @@ static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb) */ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb) { - struct MessageUnit_C *reg = acb->pmuC; + struct MessageUnit_C __iomem *reg = acb->pmuC; /*clear interrupt and message state*/ writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear); schedule_work(&acb->arcmsr_do_message_isr_bh); @@ -1824,7 +1825,7 @@ static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb) static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB) { uint32_t host_interrupt_status; - struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; + struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; /* ********************************************* ** check outbound intstatus @@ -2230,7 +2231,7 @@ static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock list_del_init(&ccb->list); }else{ spin_unlock_irqrestore(&acb->ccblist_lock, flags); - return 0; + return NULL; } spin_unlock_irqrestore(&acb->ccblist_lock, flags); return ccb; @@ -2468,11 +2469,11 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB) { uint32_t intmask_org, Index, firmware_state = 0; - struct MessageUnit_C *reg = pACB->pmuC; + struct MessageUnit_C __iomem *reg = pACB->pmuC; char *acb_firm_model = pACB->firm_model; char *acb_firm_version = pACB->firm_version; - char *iop_firm_model = (char *)(®->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/ - char *iop_firm_version = (char *)(®->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/ + char __iomem *iop_firm_model = (char __iomem *)(®->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/ + char __iomem *iop_firm_version = (char __iomem *)(®->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/ int count; /* disable all outbound interrupt */ intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ @@ -2620,7 +2621,8 @@ static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb, writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); while(1){ index = reg->doneq_index; - if ((flag_ccb = readl(®->done_qbuffer[index])) == 0) { + flag_ccb = reg->done_qbuffer[index]; + if (flag_ccb == 0) { if (poll_ccb_done){ rtn = SUCCESS; break; @@ -2633,7 +2635,7 @@ static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb, goto polling_hbb_ccb_retry; } } - writel(0, ®->done_qbuffer[index]); + reg->done_qbuffer[index] = 0; index++; /*if last index number set it to 0 */ index %= ARCMSR_MAX_HBB_POSTQUEUE; @@ -2671,7 +2673,7 @@ static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb, static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { - struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; + struct MessageUnit_C __iomem *reg = acb->pmuC; uint32_t flag_ccb, ccb_cdb_phy; struct ARCMSR_CDB *arcmsr_cdb; bool error; @@ -2834,7 +2836,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) break; case ACB_ADAPTER_TYPE_C: { if (cdb_phyaddr_hi32 != 0) { - struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; + struct MessageUnit_C __iomem *reg = acb->pmuC; printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n", acb->adapter_index, cdb_phyaddr_hi32); @@ -2875,7 +2877,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) } break; case ACB_ADAPTER_TYPE_C: { - struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; + struct MessageUnit_C __iomem *reg = acb->pmuC; do { firmware_state = readl(®->outbound_msgaddr1); } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0); @@ -2907,7 +2909,7 @@ static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb) static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb) { - struct MessageUnit_B __iomem *reg = acb->pmuB; + struct MessageUnit_B *reg = acb->pmuB; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; @@ -2992,7 +2994,7 @@ static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb) static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB) { - struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; + struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; pACB->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell); @@ -3039,7 +3041,7 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) } break; case ACB_ADAPTER_TYPE_C: { - struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; + struct MessageUnit_C __iomem *reg = acb->pmuC; uint32_t outbound_doorbell, i; /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = readl(®->outbound_doorbell); -- cgit From aaa64f69480bcde4e203584dfc77d7e4ffb737d6 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 15:22:45 +0800 Subject: arcmsr: modify some character strings Revise comment and some character strings. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr_hba.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 34a43edcf078..26bcdc0fa8bf 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -2,11 +2,10 @@ ******************************************************************************* ** O.S : Linux ** FILE NAME : arcmsr_hba.c -** BY : Nick Cheng -** Description: SCSI RAID Device Driver for -** ARECA RAID Host adapter +** BY : Nick Cheng, C.L. Huang +** Description: SCSI RAID Device Driver for Areca RAID Controller ******************************************************************************* -** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved +** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved ** ** Web site: www.areca.com.tw ** E-mail: support@areca.com.tw @@ -70,8 +69,8 @@ #include #include #include "arcmsr.h" -MODULE_AUTHOR("Nick Cheng "); -MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter"); +MODULE_AUTHOR("Nick Cheng, C.L. Huang "); +MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(ARCMSR_DRIVER_VERSION); @@ -126,8 +125,7 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, static struct scsi_host_template arcmsr_scsi_host_template = { .module = THIS_MODULE, - .name = "ARCMSR ARECA SATA/SAS RAID Controller" - ARCMSR_DRIVER_VERSION, + .name = "Areca SAS/SATA RAID driver", .info = arcmsr_info, .queuecommand = arcmsr_queue_command, .eh_abort_handler = arcmsr_abort, @@ -3387,14 +3385,14 @@ static const char *arcmsr_info(struct Scsi_Host *host) case PCI_DEVICE_ID_ARECA_1680: case PCI_DEVICE_ID_ARECA_1681: case PCI_DEVICE_ID_ARECA_1880: - type = "SAS"; + type = "SAS/SATA"; break; default: - type = "X-TYPE"; + type = "unknown"; + raid6 = 0; break; } - sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s", - type, raid6 ? "( RAID6 capable)" : "", - ARCMSR_DRIVER_VERSION); + sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n", + type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); return buf; } -- cgit From 5b37479adee7164b17b6e2030b9a30d04583eb61 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 15:25:22 +0800 Subject: arcmsr: add support new adapter ARC12x4 series Add code to support the new Areca Raid ARC12x4 series adapters. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr.h | 108 ++++++ drivers/scsi/arcmsr/arcmsr_hba.c | 761 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 860 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 799393ede1cc..d1c78efc202e 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -63,12 +63,17 @@ struct device_attribute; #define ARCMSR_MAX_QBUFFER 4096 #define ARCMSR_DEFAULT_SG_ENTRIES 38 #define ARCMSR_MAX_HBB_POSTQUEUE 264 +#define ARCMSR_MAX_ARC1214_POSTQUEUE 256 +#define ARCMSR_MAX_ARC1214_DONEQUEUE 257 #define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */ #define ARCMSR_CDB_SG_PAGE_LENGTH 256 #define ARCMST_NUM_MSIX_VECTORS 4 #ifndef PCI_DEVICE_ID_ARECA_1880 #define PCI_DEVICE_ID_ARECA_1880 0x1880 #endif +#ifndef PCI_DEVICE_ID_ARECA_1214 + #define PCI_DEVICE_ID_ARECA_1214 0x1214 +#endif /* ********************************************************************************** ** @@ -339,6 +344,56 @@ struct FIRMWARE_INFO #define ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK 0x80000000 /* ******************************************************************************* +** SPEC. for Areca Type D adapter +******************************************************************************* +*/ +#define ARCMSR_ARC1214_CHIP_ID 0x00004 +#define ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION 0x00008 +#define ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK 0x00034 +#define ARCMSR_ARC1214_SAMPLE_RESET 0x00100 +#define ARCMSR_ARC1214_RESET_REQUEST 0x00108 +#define ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS 0x00200 +#define ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE 0x0020C +#define ARCMSR_ARC1214_INBOUND_MESSAGE0 0x00400 +#define ARCMSR_ARC1214_INBOUND_MESSAGE1 0x00404 +#define ARCMSR_ARC1214_OUTBOUND_MESSAGE0 0x00420 +#define ARCMSR_ARC1214_OUTBOUND_MESSAGE1 0x00424 +#define ARCMSR_ARC1214_INBOUND_DOORBELL 0x00460 +#define ARCMSR_ARC1214_OUTBOUND_DOORBELL 0x00480 +#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE 0x00484 +#define ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW 0x01000 +#define ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH 0x01004 +#define ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER 0x01018 +#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW 0x01060 +#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH 0x01064 +#define ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER 0x0106C +#define ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER 0x01070 +#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE 0x01088 +#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE 0x0108C +#define ARCMSR_ARC1214_MESSAGE_WBUFFER 0x02000 +#define ARCMSR_ARC1214_MESSAGE_RBUFFER 0x02100 +#define ARCMSR_ARC1214_MESSAGE_RWBUFFER 0x02200 +/* Host Interrupt Mask */ +#define ARCMSR_ARC1214_ALL_INT_ENABLE 0x00001010 +#define ARCMSR_ARC1214_ALL_INT_DISABLE 0x00000000 +/* Host Interrupt Status */ +#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR 0x00001000 +#define ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR 0x00000010 +/* DoorBell*/ +#define ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY 0x00000001 +#define ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ 0x00000002 +/*inbound message 0 ready*/ +#define ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK 0x00000001 +/*outbound DATA WRITE isr door bell clear*/ +#define ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK 0x00000002 +/*outbound message 0 ready*/ +#define ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE 0x02000000 +/*outbound message cmd isr door bell clear*/ +/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/ +#define ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK 0x80000000 +#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001 +/* +******************************************************************************* ** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504) ******************************************************************************* */ @@ -496,6 +551,56 @@ struct MessageUnit_C{ uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/ }; /* +********************************************************************* +** Messaging Unit (MU) of Type D processor +********************************************************************* +*/ +struct InBound_SRB { + uint32_t addressLow; /* pointer to SRB block */ + uint32_t addressHigh; + uint32_t length; /* in DWORDs */ + uint32_t reserved0; +}; + +struct OutBound_SRB { + uint32_t addressLow; /* pointer to SRB block */ + uint32_t addressHigh; +}; + +struct MessageUnit_D { + struct InBound_SRB post_qbuffer[ARCMSR_MAX_ARC1214_POSTQUEUE]; + volatile struct OutBound_SRB + done_qbuffer[ARCMSR_MAX_ARC1214_DONEQUEUE]; + u16 postq_index; + volatile u16 doneq_index; + u32 __iomem *chip_id; /* 0x00004 */ + u32 __iomem *cpu_mem_config; /* 0x00008 */ + u32 __iomem *i2o_host_interrupt_mask; /* 0x00034 */ + u32 __iomem *sample_at_reset; /* 0x00100 */ + u32 __iomem *reset_request; /* 0x00108 */ + u32 __iomem *host_int_status; /* 0x00200 */ + u32 __iomem *pcief0_int_enable; /* 0x0020C */ + u32 __iomem *inbound_msgaddr0; /* 0x00400 */ + u32 __iomem *inbound_msgaddr1; /* 0x00404 */ + u32 __iomem *outbound_msgaddr0; /* 0x00420 */ + u32 __iomem *outbound_msgaddr1; /* 0x00424 */ + u32 __iomem *inbound_doorbell; /* 0x00460 */ + u32 __iomem *outbound_doorbell; /* 0x00480 */ + u32 __iomem *outbound_doorbell_enable; /* 0x00484 */ + u32 __iomem *inboundlist_base_low; /* 0x01000 */ + u32 __iomem *inboundlist_base_high; /* 0x01004 */ + u32 __iomem *inboundlist_write_pointer; /* 0x01018 */ + u32 __iomem *outboundlist_base_low; /* 0x01060 */ + u32 __iomem *outboundlist_base_high; /* 0x01064 */ + u32 __iomem *outboundlist_copy_pointer; /* 0x0106C */ + u32 __iomem *outboundlist_read_pointer; /* 0x01070 0x01072 */ + u32 __iomem *outboundlist_interrupt_cause; /* 0x1088 */ + u32 __iomem *outboundlist_interrupt_enable; /* 0x108C */ + u32 __iomem *message_wbuffer; /* 0x2000 */ + u32 __iomem *message_rbuffer; /* 0x2100 */ + u32 __iomem *msgcode_rwbuffer; /* 0x2200 */ +}; +/* ******************************************************************************* ** Adapter Control Block ******************************************************************************* @@ -518,12 +623,15 @@ struct AdapterControlBlock uint32_t reg_mu_acc_handle0; spinlock_t eh_lock; spinlock_t ccblist_lock; + spinlock_t postq_lock; + spinlock_t doneq_lock; spinlock_t rqbuffer_lock; spinlock_t wqbuffer_lock; union { struct MessageUnit_A __iomem *pmuA; struct MessageUnit_B *pmuB; struct MessageUnit_C __iomem *pmuC; + struct MessageUnit_D *pmuD; }; /* message unit ATU inbound base address0 */ void __iomem *mem_base0; diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 26bcdc0fa8bf..b3cb969c72d5 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -108,6 +108,7 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work); static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb); static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB); +static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb); static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); static const char *arcmsr_info(struct Scsi_Host *); static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); @@ -161,6 +162,8 @@ static struct pci_device_id arcmsr_device_id_table[] = { .driver_data = ACB_ADAPTER_TYPE_B}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210), .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214), + .driver_data = ACB_ADAPTER_TYPE_D}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230), @@ -202,7 +205,8 @@ static struct pci_driver arcmsr_pci_driver = { static void arcmsr_free_mu(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { - case ACB_ADAPTER_TYPE_B:{ + case ACB_ADAPTER_TYPE_B: + case ACB_ADAPTER_TYPE_D: { dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize, acb->dma_coherent2, acb->dma_coherent_handle2); break; @@ -251,6 +255,25 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) } break; } + case ACB_ADAPTER_TYPE_D: { + void __iomem *mem_base0; + unsigned long addr, range, flags; + + addr = (unsigned long)pci_resource_start(pdev, 0); + range = pci_resource_len(pdev, 0); + flags = pci_resource_flags(pdev, 0); + if (flags & IORESOURCE_CACHEABLE) + mem_base0 = ioremap(addr, range); + else + mem_base0 = ioremap_nocache(addr, range); + if (!mem_base0) { + pr_notice("arcmsr%d: memory mapping region fail\n", + acb->host->host_no); + return false; + } + acb->mem_base0 = mem_base0; + break; + } } return true; } @@ -271,6 +294,10 @@ static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_C:{ iounmap(acb->pmuC); } + break; + case ACB_ADAPTER_TYPE_D: + iounmap(acb->mem_base0); + break; } } @@ -367,6 +394,23 @@ static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB) return false; } +static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_D *reg = pACB->pmuD; + int i; + + for (i = 0; i < 2000; i++) { + if (readl(reg->outbound_doorbell) + & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { + writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, + reg->outbound_doorbell); + return true; + } + msleep(10); + } /* max 20 seconds */ + return false; +} + static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -416,6 +460,24 @@ static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB) } while (retry_count != 0); return; } + +static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB) +{ + int retry_count = 15; + struct MessageUnit_D *reg = pACB->pmuD; + + writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0); + do { + if (arcmsr_hbaD_wait_msgint_ready(pACB)) + break; + + retry_count--; + pr_notice("arcmsr%d: wait 'flush adapter " + "cache' timeout, retry count down = %d\n", + pACB->host->host_no, retry_count); + } while (retry_count != 0); +} + static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { @@ -432,6 +494,10 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_C: { arcmsr_hbaC_flush_cache(acb); } + break; + case ACB_ADAPTER_TYPE_D: + arcmsr_hbaD_flush_cache(acb); + break; } } @@ -475,9 +541,16 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){ cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); - ccb_tmp->cdb_phyaddr = - ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? - cdb_phyaddr : (cdb_phyaddr >> 5)); + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: + case ACB_ADAPTER_TYPE_B: + ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5; + break; + case ACB_ADAPTER_TYPE_C: + case ACB_ADAPTER_TYPE_D: + ccb_tmp->cdb_phyaddr = cdb_phyaddr; + break; + } acb->pccb_pool[i] = ccb_tmp; ccb_tmp->acb = acb; INIT_LIST_HEAD(&ccb_tmp->list); @@ -521,6 +594,13 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work) devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); break; } + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + + signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); + devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); + break; + } } atomic_inc(&acb->rq_map_token); if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG) @@ -651,6 +731,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) } spin_lock_init(&acb->eh_lock); spin_lock_init(&acb->ccblist_lock); + spin_lock_init(&acb->postq_lock); + spin_lock_init(&acb->doneq_lock); spin_lock_init(&acb->rqbuffer_lock); spin_lock_init(&acb->wqbuffer_lock); acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | @@ -836,6 +918,20 @@ static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB) } return true; } + +static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_D *reg = pACB->pmuD; + + writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0); + if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { + pr_notice("arcmsr%d: wait 'abort all outstanding " + "command' timeout\n", pACB->host->host_no); + return false; + } + return true; +} + static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { uint8_t rtnval = 0; @@ -853,6 +949,11 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_C: { rtnval = arcmsr_hbaC_abort_allcmd(acb); } + break; + + case ACB_ADAPTER_TYPE_D: + rtnval = arcmsr_hbaD_abort_allcmd(acb); + break; } return rtnval; } @@ -919,6 +1020,12 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask); } break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + /* disable all outbound interrupt */ + writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable); + } + break; } return orig_mask; } @@ -1066,7 +1173,62 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); } - } + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *pmu = acb->pmuD; + uint32_t ccb_cdb_phy, outbound_write_pointer; + uint32_t doneq_index, index_stripped, addressLow, residual; + bool error; + struct CommandControlBlock *pCCB; + + outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; + doneq_index = pmu->doneq_index; + residual = atomic_read(&acb->ccboutstandingcount); + for (i = 0; i < residual; i++) { + while ((doneq_index & 0xFFF) != + (outbound_write_pointer & 0xFFF)) { + if (doneq_index & 0x4000) { + index_stripped = doneq_index & 0xFFF; + index_stripped += 1; + index_stripped %= + ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped ? + (index_stripped | 0x4000) : + (index_stripped + 1); + } else { + index_stripped = doneq_index; + index_stripped += 1; + index_stripped %= + ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped ? + index_stripped : + ((index_stripped | 0x4000) + 1); + } + doneq_index = pmu->doneq_index; + addressLow = pmu->done_qbuffer[doneq_index & + 0xFFF].addressLow; + ccb_cdb_phy = (addressLow & 0xFFFFFFF0); + pARCMSR_CDB = (struct ARCMSR_CDB *) + (acb->vir2phy_offset + ccb_cdb_phy); + pCCB = container_of(pARCMSR_CDB, + struct CommandControlBlock, arcmsr_cdb); + error = (addressLow & + ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? + true : false; + arcmsr_drain_donequeue(acb, pCCB, error); + writel(doneq_index, + pmu->outboundlist_read_pointer); + } + mdelay(10); + outbound_write_pointer = + pmu->done_qbuffer[0].addressLow + 1; + doneq_index = pmu->doneq_index; + } + pmu->postq_index = 0; + pmu->doneq_index = 0x40FF; + } + break; } } @@ -1175,6 +1337,14 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, writel(intmask_org & mask, ®->host_int_mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + + mask = ARCMSR_ARC1214_ALL_INT_ENABLE; + writel(intmask_org | mask, reg->pcief0_int_enable); + break; + } } } @@ -1282,6 +1452,38 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); } } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *pmu = acb->pmuD; + u16 index_stripped; + u16 postq_index; + unsigned long flags; + struct InBound_SRB *pinbound_srb; + + spin_lock_irqsave(&acb->postq_lock, flags); + postq_index = pmu->postq_index; + pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]); + pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr); + pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr); + pinbound_srb->length = ccb->arc_cdb_size >> 2; + arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr); + if (postq_index & 0x4000) { + index_stripped = postq_index & 0xFF; + index_stripped += 1; + index_stripped %= ARCMSR_MAX_ARC1214_POSTQUEUE; + pmu->postq_index = index_stripped ? + (index_stripped | 0x4000) : index_stripped; + } else { + index_stripped = postq_index; + index_stripped += 1; + index_stripped %= ARCMSR_MAX_ARC1214_POSTQUEUE; + pmu->postq_index = index_stripped ? index_stripped : + (index_stripped | 0x4000); + } + writel(postq_index, pmu->inboundlist_write_pointer); + spin_unlock_irqrestore(&acb->postq_lock, flags); + break; + } } } @@ -1323,6 +1525,18 @@ static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB) } return; } + +static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_D *reg = pACB->pmuD; + + pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0); + if (!arcmsr_hbaD_wait_msgint_ready(pACB)) + pr_notice("arcmsr%d: wait 'stop adapter background rebulid' " + "timeout\n", pACB->host->host_no); +} + static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { @@ -1338,6 +1552,10 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_C: { arcmsr_hbaC_stop_bgrb(acb); } + break; + case ACB_ADAPTER_TYPE_D: + arcmsr_hbaD_stop_bgrb(acb); + break; } } @@ -1362,8 +1580,16 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; + writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell); } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, + reg->inbound_doorbell); + } + break; } } @@ -1398,6 +1624,12 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, ®->inbound_doorbell); } break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY, + reg->inbound_doorbell); + } + break; } } @@ -1421,6 +1653,12 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) struct MessageUnit_C __iomem *phbcmu = acb->pmuC; qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer; } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; + } + break; } return qbuffer; } @@ -1444,8 +1682,13 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer; - } - + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; + } + break; } return pqbuffer; } @@ -1500,7 +1743,7 @@ arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, uint8_t __iomem *iop_data; uint32_t iop_len; - if (acb->adapter_type & ACB_ADAPTER_TYPE_C) + if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer); iop_data = (uint8_t __iomem *)prbuffer->data; iop_len = readl(&prbuffer->data_len); @@ -1586,7 +1829,7 @@ arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb) uint8_t __iomem *iop_data; int32_t allxfer_len = 0; - if (acb->adapter_type & ACB_ADAPTER_TYPE_C) { + if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) { arcmsr_write_ioctldata2iop_in_DWORD(acb); return; } @@ -1662,6 +1905,27 @@ static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB) | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)); } + +static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB) +{ + uint32_t outbound_doorbell; + struct MessageUnit_D *pmu = pACB->pmuD; + + outbound_doorbell = readl(pmu->outbound_doorbell); + do { + writel(outbound_doorbell, pmu->outbound_doorbell); + if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) + arcmsr_hbaD_message_isr(pACB); + if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) + arcmsr_iop2drv_data_wrote_handle(pACB); + if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK) + arcmsr_iop2drv_data_read_handle(pACB); + outbound_doorbell = readl(pmu->outbound_doorbell); + } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK + | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK + | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)); +} + static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb) { uint32_t flag_ccb; @@ -1728,6 +1992,59 @@ static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) } } } + +static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb) +{ + u32 outbound_write_pointer, doneq_index, index_stripped; + uint32_t addressLow, ccb_cdb_phy; + int error; + struct MessageUnit_D *pmu; + struct ARCMSR_CDB *arcmsr_cdb; + struct CommandControlBlock *ccb; + unsigned long flags; + + spin_lock_irqsave(&acb->doneq_lock, flags); + pmu = acb->pmuD; + outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; + doneq_index = pmu->doneq_index; + if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) { + do { + if (doneq_index & 0x4000) { + index_stripped = doneq_index & 0xFFF; + index_stripped += 1; + index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped + ? (index_stripped | 0x4000) : + (index_stripped + 1); + } else { + index_stripped = doneq_index; + index_stripped += 1; + index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped + ? index_stripped : + ((index_stripped | 0x4000) + 1); + } + doneq_index = pmu->doneq_index; + addressLow = pmu->done_qbuffer[doneq_index & + 0xFFF].addressLow; + ccb_cdb_phy = (addressLow & 0xFFFFFFF0); + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + + ccb_cdb_phy); + ccb = container_of(arcmsr_cdb, + struct CommandControlBlock, arcmsr_cdb); + error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) + ? true : false; + arcmsr_drain_donequeue(acb, ccb, error); + writel(doneq_index, pmu->outboundlist_read_pointer); + } while ((doneq_index & 0xFFF) != + (outbound_write_pointer & 0xFFF)); + } + writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR, + pmu->outboundlist_interrupt_cause); + readl(pmu->outboundlist_interrupt_cause); + spin_unlock_irqrestore(&acb->doneq_lock, flags); +} + /* ********************************************************************************** ** Handle a message interrupt @@ -1768,6 +2085,15 @@ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb) schedule_work(&acb->arcmsr_do_message_isr_bh); } +static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_D *reg = acb->pmuD; + + writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell); + readl(reg->outbound_doorbell); + schedule_work(&acb->arcmsr_do_message_isr_bh); +} + static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb) { uint32_t outbound_intstatus; @@ -1845,6 +2171,32 @@ static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB) ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); return IRQ_HANDLED; } + +static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB) +{ + u32 host_interrupt_status; + struct MessageUnit_D *pmu = pACB->pmuD; + + host_interrupt_status = readl(pmu->host_int_status) & + (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR); + if (!host_interrupt_status) + return IRQ_NONE; + do { + /* MU post queue interrupts*/ + if (host_interrupt_status & + ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR) + arcmsr_hbaD_postqueue_isr(pACB); + if (host_interrupt_status & + ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR) + arcmsr_hbaD_doorbell_isr(pACB); + host_interrupt_status = readl(pmu->host_int_status); + } while (host_interrupt_status & + (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)); + return IRQ_HANDLED; +} + static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { @@ -1856,6 +2208,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) break; case ACB_ADAPTER_TYPE_C: return arcmsr_hbaC_handle_isr(acb); + case ACB_ADAPTER_TYPE_D: + return arcmsr_hbaD_handle_isr(acb); default: return IRQ_NONE; } @@ -2522,6 +2876,137 @@ static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB) /*all interrupt service will be enable at arcmsr_iop_init*/ return true; } + +static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb) +{ + char *acb_firm_model = acb->firm_model; + char *acb_firm_version = acb->firm_version; + char *acb_device_map = acb->device_map; + char __iomem *iop_firm_model; + char __iomem *iop_firm_version; + char __iomem *iop_device_map; + u32 count; + struct MessageUnit_D *reg ; + void *dma_coherent2; + dma_addr_t dma_coherent_handle2; + struct pci_dev *pdev = acb->pdev; + + acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); + dma_coherent2 = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize, + &dma_coherent_handle2, GFP_KERNEL); + if (!dma_coherent2) { + pr_notice("DMA allocation failed...\n"); + return false; + } + memset(dma_coherent2, 0, acb->roundup_ccbsize); + acb->dma_coherent_handle2 = dma_coherent_handle2; + acb->dma_coherent2 = dma_coherent2; + reg = (struct MessageUnit_D *)dma_coherent2; + acb->pmuD = reg; + reg->chip_id = acb->mem_base0 + ARCMSR_ARC1214_CHIP_ID; + reg->cpu_mem_config = acb->mem_base0 + + ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION; + reg->i2o_host_interrupt_mask = acb->mem_base0 + + ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK; + reg->sample_at_reset = acb->mem_base0 + ARCMSR_ARC1214_SAMPLE_RESET; + reg->reset_request = acb->mem_base0 + ARCMSR_ARC1214_RESET_REQUEST; + reg->host_int_status = acb->mem_base0 + + ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS; + reg->pcief0_int_enable = acb->mem_base0 + + ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE; + reg->inbound_msgaddr0 = acb->mem_base0 + + ARCMSR_ARC1214_INBOUND_MESSAGE0; + reg->inbound_msgaddr1 = acb->mem_base0 + + ARCMSR_ARC1214_INBOUND_MESSAGE1; + reg->outbound_msgaddr0 = acb->mem_base0 + + ARCMSR_ARC1214_OUTBOUND_MESSAGE0; + reg->outbound_msgaddr1 = acb->mem_base0 + + ARCMSR_ARC1214_OUTBOUND_MESSAGE1; + reg->inbound_doorbell = acb->mem_base0 + + ARCMSR_ARC1214_INBOUND_DOORBELL; + reg->outbound_doorbell = acb->mem_base0 + + ARCMSR_ARC1214_OUTBOUND_DOORBELL; + reg->outbound_doorbell_enable = acb->mem_base0 + + ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE; + reg->inboundlist_base_low = acb->mem_base0 + + ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW; + reg->inboundlist_base_high = acb->mem_base0 + + ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH; + reg->inboundlist_write_pointer = acb->mem_base0 + + ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER; + reg->outboundlist_base_low = acb->mem_base0 + + ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW; + reg->outboundlist_base_high = acb->mem_base0 + + ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH; + reg->outboundlist_copy_pointer = acb->mem_base0 + + ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER; + reg->outboundlist_read_pointer = acb->mem_base0 + + ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER; + reg->outboundlist_interrupt_cause = acb->mem_base0 + + ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE; + reg->outboundlist_interrupt_enable = acb->mem_base0 + + ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE; + reg->message_wbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_WBUFFER; + reg->message_rbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_RBUFFER; + reg->msgcode_rwbuffer = acb->mem_base0 + + ARCMSR_ARC1214_MESSAGE_RWBUFFER; + iop_firm_model = (char __iomem *)(®->msgcode_rwbuffer[15]); + iop_firm_version = (char __iomem *)(®->msgcode_rwbuffer[17]); + iop_device_map = (char __iomem *)(®->msgcode_rwbuffer[21]); + if (readl(acb->pmuD->outbound_doorbell) & + ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { + writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, + acb->pmuD->outbound_doorbell);/*clear interrupt*/ + } + /* post "get config" instruction */ + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); + /* wait message ready */ + if (!arcmsr_hbaD_wait_msgint_ready(acb)) { + pr_notice("arcmsr%d: wait get adapter firmware " + "miscellaneous data timeout\n", acb->host->host_no); + dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize, + acb->dma_coherent2, acb->dma_coherent_handle2); + return false; + } + count = 8; + while (count) { + *acb_firm_model = readb(iop_firm_model); + acb_firm_model++; + iop_firm_model++; + count--; + } + count = 16; + while (count) { + *acb_firm_version = readb(iop_firm_version); + acb_firm_version++; + iop_firm_version++; + count--; + } + count = 16; + while (count) { + *acb_device_map = readb(iop_device_map); + acb_device_map++; + iop_device_map++; + count--; + } + acb->signature = readl(®->msgcode_rwbuffer[1]); + /*firm_signature,1,00-03*/ + acb->firm_request_len = readl(®->msgcode_rwbuffer[2]); + /*firm_request_len,1,04-07*/ + acb->firm_numbers_queue = readl(®->msgcode_rwbuffer[3]); + /*firm_numbers_queue,2,08-11*/ + acb->firm_sdram_size = readl(®->msgcode_rwbuffer[4]); + /*firm_sdram_size,3,12-15*/ + acb->firm_hd_channels = readl(®->msgcode_rwbuffer[5]); + /*firm_hd_channels,4,16-19*/ + acb->firm_cfg_version = readl(®->msgcode_rwbuffer[25]); + pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n", + acb->host->host_no, + acb->firm_model, + acb->firm_version); + return true; +} + static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) { bool rtn = false; @@ -2536,6 +3021,9 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_C: rtn = arcmsr_hbaC_get_config(acb); break; + case ACB_ADAPTER_TYPE_D: + rtn = arcmsr_hbaD_get_config(acb); + break; default: break; } @@ -2725,6 +3213,89 @@ polling_hbc_ccb_retry: } return rtn; } + +static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb, + struct CommandControlBlock *poll_ccb) +{ + bool error; + uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy; + int rtn, doneq_index, index_stripped, outbound_write_pointer; + unsigned long flags; + struct ARCMSR_CDB *arcmsr_cdb; + struct CommandControlBlock *pCCB; + struct MessageUnit_D *pmu = acb->pmuD; + +polling_hbaD_ccb_retry: + poll_count++; + while (1) { + outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; + doneq_index = pmu->doneq_index; + if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) { + if (poll_ccb_done) { + rtn = SUCCESS; + break; + } else { + msleep(25); + if (poll_count > 40) { + rtn = FAILED; + break; + } + goto polling_hbaD_ccb_retry; + } + } + spin_lock_irqsave(&acb->doneq_lock, flags); + if (doneq_index & 0x4000) { + index_stripped = doneq_index & 0xFFF; + index_stripped += 1; + index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped ? + (index_stripped | 0x4000) : + (index_stripped + 1); + } else { + index_stripped = doneq_index; + index_stripped += 1; + index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped ? index_stripped : + ((index_stripped | 0x4000) + 1); + } + spin_unlock_irqrestore(&acb->doneq_lock, flags); + doneq_index = pmu->doneq_index; + flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; + ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + + ccb_cdb_phy); + pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, + arcmsr_cdb); + poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; + if ((pCCB->acb != acb) || + (pCCB->startdone != ARCMSR_CCB_START)) { + if (pCCB->startdone == ARCMSR_CCB_ABORTED) { + pr_notice("arcmsr%d: scsi id = %d " + "lun = %d ccb = '0x%p' poll command " + "abort successfully\n" + , acb->host->host_no + , pCCB->pcmd->device->id + , (u32)pCCB->pcmd->device->lun + , pCCB); + pCCB->pcmd->result = DID_ABORT << 16; + arcmsr_ccb_complete(pCCB); + continue; + } + pr_notice("arcmsr%d: polling an illegal " + "ccb command done ccb = '0x%p' " + "ccboutstandingcount = %d\n" + , acb->host->host_no + , pCCB + , atomic_read(&acb->ccboutstandingcount)); + continue; + } + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) + ? true : false; + arcmsr_report_ccb_state(acb, pCCB, error); + } + return rtn; +} + static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { @@ -2743,6 +3314,10 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, case ACB_ADAPTER_TYPE_C: { rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb); } + break; + case ACB_ADAPTER_TYPE_D: + rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb); + break; } return rtn; } @@ -2760,6 +3335,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) */ switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_B: + case ACB_ADAPTER_TYPE_D: dma_coherent_handle = acb->dma_coherent_handle2; break; default: @@ -2849,6 +3425,27 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) } } } + break; + case ACB_ADAPTER_TYPE_D: { + uint32_t __iomem *rwbuffer; + struct MessageUnit_D *reg = acb->pmuD; + reg->postq_index = 0; + reg->doneq_index = 0; + rwbuffer = reg->msgcode_rwbuffer; + writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); + writel(cdb_phyaddr_hi32, rwbuffer++); + writel(cdb_phyaddr, rwbuffer++); + writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE * + sizeof(struct InBound_SRB)), rwbuffer++); + writel(0x100, rwbuffer); + writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0); + if (!arcmsr_hbaD_wait_msgint_ready(acb)) { + pr_notice("arcmsr%d: 'set command Q window' timeout\n", + acb->host->host_no); + return 1; + } + } + break; } return 0; } @@ -2880,6 +3477,15 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) firmware_state = readl(®->outbound_msgaddr1); } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0); } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + do { + firmware_state = readl(reg->outbound_msgaddr1); + } while ((firmware_state & + ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0); + } + break; } } @@ -2950,6 +3556,35 @@ static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb) return; } +static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb) +{ + struct MessageUnit_D *reg = acb->pmuD; + + if (unlikely(atomic_read(&acb->rq_map_token) == 0) || + ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || + ((acb->acb_flags & ACB_F_ABORT) != 0)) { + mod_timer(&acb->eternal_timer, + jiffies + msecs_to_jiffies(6 * HZ)); + } else { + acb->fw_flag = FW_NORMAL; + if (atomic_read(&acb->ante_token_value) == + atomic_read(&acb->rq_map_token)) { + atomic_set(&acb->rq_map_token, 16); + } + atomic_set(&acb->ante_token_value, + atomic_read(&acb->rq_map_token)); + if (atomic_dec_and_test(&acb->rq_map_token)) { + mod_timer(&acb->eternal_timer, jiffies + + msecs_to_jiffies(6 * HZ)); + return; + } + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, + reg->inbound_msgaddr0); + mod_timer(&acb->eternal_timer, jiffies + + msecs_to_jiffies(6 * HZ)); + } +} + static void arcmsr_request_device_map(unsigned long pacb) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb; @@ -2965,6 +3600,10 @@ static void arcmsr_request_device_map(unsigned long pacb) case ACB_ADAPTER_TYPE_C: { arcmsr_hbaC_request_device_map(acb); } + break; + case ACB_ADAPTER_TYPE_D: + arcmsr_hbaD_request_device_map(acb); + break; } } @@ -3002,6 +3641,19 @@ static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB) } return; } + +static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_D *pmu = pACB->pmuD; + + pACB->acb_flags |= ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0); + if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { + pr_notice("arcmsr%d: wait 'start adapter " + "background rebulid' timeout\n", pACB->host->host_no); + } +} + static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { @@ -3013,6 +3665,10 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) break; case ACB_ADAPTER_TYPE_C: arcmsr_hbaC_start_bgrb(acb); + break; + case ACB_ADAPTER_TYPE_D: + arcmsr_hbaD_start_bgrb(acb); + break; } } @@ -3058,6 +3714,29 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) break; } } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + uint32_t outbound_doorbell, i; + /* empty doorbell Qbuffer if door bell ringed */ + outbound_doorbell = readl(reg->outbound_doorbell); + writel(outbound_doorbell, reg->outbound_doorbell); + writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, + reg->inbound_doorbell); + for (i = 0; i < 200; i++) { + msleep(20); + outbound_doorbell = readl(reg->outbound_doorbell); + if (outbound_doorbell & + ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) { + writel(outbound_doorbell, + reg->outbound_doorbell); + writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, + reg->inbound_doorbell); + } else + break; + } + } + break; } } @@ -3088,6 +3767,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) int i, count = 0; struct MessageUnit_A __iomem *pmuA = acb->pmuA; struct MessageUnit_C __iomem *pmuC = acb->pmuC; + struct MessageUnit_D *pmuD = acb->pmuD; /* backup pci config data */ printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no); @@ -3108,6 +3788,8 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) writel(0xD, &pmuC->write_sequence); } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic); + } else if ((acb->dev_id == 0x1214)) { + writel(0x20, pmuD->reset_request); } else { pci_write_config_byte(acb->pdev, 0x84, 0x20); } @@ -3304,6 +3986,66 @@ sleep: } break; } + case ACB_ADAPTER_TYPE_D: { + if (acb->acb_flags & ACB_F_BUS_RESET) { + long timeout; + pr_notice("arcmsr: there is an bus reset" + " eh proceeding.......\n"); + timeout = wait_event_timeout(wait_q, (acb->acb_flags + & ACB_F_BUS_RESET) == 0, 220 * HZ); + if (timeout) + return SUCCESS; + } + acb->acb_flags |= ACB_F_BUS_RESET; + if (!arcmsr_iop_reset(acb)) { + struct MessageUnit_D *reg; + reg = acb->pmuD; + arcmsr_hardware_reset(acb); + acb->acb_flags &= ~ACB_F_IOP_INITED; + nap: + ssleep(ARCMSR_SLEEPTIME); + if ((readl(reg->sample_at_reset) & 0x80) != 0) { + pr_err("arcmsr%d: waiting for " + "hw bus reset return, retry=%d\n", + acb->host->host_no, retry_count); + if (retry_count > ARCMSR_RETRYCOUNT) { + acb->fw_flag = FW_DEADLOCK; + pr_err("arcmsr%d: waiting for hw bus" + " reset return, " + "RETRY TERMINATED!!\n", + acb->host->host_no); + return FAILED; + } + retry_count++; + goto nap; + } + acb->acb_flags |= ACB_F_IOP_INITED; + /* disable all outbound interrupt */ + intmask_org = arcmsr_disable_outbound_ints(acb); + arcmsr_get_firmware_spec(acb); + arcmsr_start_adapter_bgrb(acb); + arcmsr_clear_doorbell_queue_buffer(acb); + arcmsr_enable_outbound_ints(acb, intmask_org); + atomic_set(&acb->rq_map_token, 16); + atomic_set(&acb->ante_token_value, 16); + acb->fw_flag = FW_NORMAL; + mod_timer(&acb->eternal_timer, + jiffies + msecs_to_jiffies(6 * HZ)); + acb->acb_flags &= ~ACB_F_BUS_RESET; + rtn = SUCCESS; + pr_err("arcmsr: scsi bus reset " + "eh returns with success\n"); + } else { + acb->acb_flags &= ~ACB_F_BUS_RESET; + atomic_set(&acb->rq_map_token, 16); + atomic_set(&acb->ante_token_value, 16); + acb->fw_flag = FW_NORMAL; + mod_timer(&acb->eternal_timer, + jiffies + msecs_to_jiffies(6 * HZ)); + rtn = SUCCESS; + } + break; + } } return rtn; } @@ -3380,6 +4122,7 @@ static const char *arcmsr_info(struct Scsi_Host *host) case PCI_DEVICE_ID_ARECA_1280: type = "SATA"; break; + case PCI_DEVICE_ID_ARECA_1214: case PCI_DEVICE_ID_ARECA_1380: case PCI_DEVICE_ID_ARECA_1381: case PCI_DEVICE_ID_ARECA_1680: -- cgit From b4eb6ae9075a958ffe24620f985f6bd729a1b138 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Tue, 19 Aug 2014 15:28:36 +0800 Subject: arcmsr: call scsi_scan_host at the end of host initialization Call scsi_scan_host at the end of host initialization and fix and error path to free allocated resource. Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr_hba.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index b3cb969c72d5..3d3cdfe07089 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -112,6 +112,7 @@ static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb); static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); static const char *arcmsr_info(struct Scsi_Host *); static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); +static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth, int reason) { @@ -755,12 +756,11 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) } error = scsi_add_host(host, &pdev->dev); if(error){ - goto RAID_controller_stop; + goto free_ccb_pool; } if (arcmsr_request_irq(pdev, acb) == FAILED) goto scsi_host_remove; arcmsr_iop_init(acb); - scsi_scan_host(host); INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); @@ -772,13 +772,17 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) add_timer(&acb->eternal_timer); if(arcmsr_alloc_sysfs_attr(acb)) goto out_free_sysfs; + scsi_scan_host(host); return 0; out_free_sysfs: -scsi_host_remove: - scsi_remove_host(host); -RAID_controller_stop: + del_timer_sync(&acb->eternal_timer); + flush_work(&acb->arcmsr_do_message_isr_bh); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); + arcmsr_free_irq(pdev, acb); +scsi_host_remove: + scsi_remove_host(host); +free_ccb_pool: arcmsr_free_ccb_pool(acb); free_hbb_mu: arcmsr_free_mu(acb); -- cgit From 3b8155d582968f79a62c79358d5e137f99f04407 Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Mon, 15 Sep 2014 19:05:33 +0800 Subject: arcmsr: simplify of updating doneq_index and postq_index Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr_hba.c | 121 +++++++++++++-------------------------- 1 file changed, 40 insertions(+), 81 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 3d3cdfe07089..0dd38ccc9470 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -1120,7 +1120,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) { int i = 0; - uint32_t flag_ccb; + uint32_t flag_ccb, ccb_cdb_phy; struct ARCMSR_CDB *pARCMSR_CDB; bool error; struct CommandControlBlock *pCCB; @@ -1164,10 +1164,6 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; - struct ARCMSR_CDB *pARCMSR_CDB; - uint32_t flag_ccb, ccb_cdb_phy; - bool error; - struct CommandControlBlock *pCCB; while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { /*need to do*/ flag_ccb = readl(®->outbound_queueport_low); @@ -1181,35 +1177,25 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) break; case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *pmu = acb->pmuD; - uint32_t ccb_cdb_phy, outbound_write_pointer; - uint32_t doneq_index, index_stripped, addressLow, residual; - bool error; - struct CommandControlBlock *pCCB; + uint32_t outbound_write_pointer; + uint32_t doneq_index, index_stripped, addressLow, residual, toggle; + unsigned long flags; - outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; - doneq_index = pmu->doneq_index; residual = atomic_read(&acb->ccboutstandingcount); for (i = 0; i < residual; i++) { - while ((doneq_index & 0xFFF) != + spin_lock_irqsave(&acb->doneq_lock, flags); + outbound_write_pointer = + pmu->done_qbuffer[0].addressLow + 1; + doneq_index = pmu->doneq_index; + if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) { - if (doneq_index & 0x4000) { - index_stripped = doneq_index & 0xFFF; - index_stripped += 1; - index_stripped %= - ARCMSR_MAX_ARC1214_DONEQUEUE; - pmu->doneq_index = index_stripped ? - (index_stripped | 0x4000) : - (index_stripped + 1); - } else { - index_stripped = doneq_index; - index_stripped += 1; - index_stripped %= - ARCMSR_MAX_ARC1214_DONEQUEUE; - pmu->doneq_index = index_stripped ? - index_stripped : - ((index_stripped | 0x4000) + 1); - } + toggle = doneq_index & 0x4000; + index_stripped = (doneq_index & 0xFFF) + 1; + index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped ? (index_stripped | toggle) : + ((toggle ^ 0x4000) + 1); doneq_index = pmu->doneq_index; + spin_unlock_irqrestore(&acb->doneq_lock, flags); addressLow = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; ccb_cdb_phy = (addressLow & 0xFFFFFFF0); @@ -1223,11 +1209,10 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) arcmsr_drain_donequeue(acb, pCCB, error); writel(doneq_index, pmu->outboundlist_read_pointer); + } else { + spin_unlock_irqrestore(&acb->doneq_lock, flags); + mdelay(10); } - mdelay(10); - outbound_write_pointer = - pmu->done_qbuffer[0].addressLow + 1; - doneq_index = pmu->doneq_index; } pmu->postq_index = 0; pmu->doneq_index = 0x40FF; @@ -1460,7 +1445,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *pmu = acb->pmuD; u16 index_stripped; - u16 postq_index; + u16 postq_index, toggle; unsigned long flags; struct InBound_SRB *pinbound_srb; @@ -1471,19 +1456,11 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr); pinbound_srb->length = ccb->arc_cdb_size >> 2; arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr); - if (postq_index & 0x4000) { - index_stripped = postq_index & 0xFF; - index_stripped += 1; - index_stripped %= ARCMSR_MAX_ARC1214_POSTQUEUE; - pmu->postq_index = index_stripped ? - (index_stripped | 0x4000) : index_stripped; - } else { - index_stripped = postq_index; - index_stripped += 1; - index_stripped %= ARCMSR_MAX_ARC1214_POSTQUEUE; - pmu->postq_index = index_stripped ? index_stripped : - (index_stripped | 0x4000); - } + toggle = postq_index & 0x4000; + index_stripped = postq_index + 1; + index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1); + pmu->postq_index = index_stripped ? (index_stripped | toggle) : + (toggle ^ 0x4000); writel(postq_index, pmu->inboundlist_write_pointer); spin_unlock_irqrestore(&acb->postq_lock, flags); break; @@ -1999,7 +1976,7 @@ static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb) { - u32 outbound_write_pointer, doneq_index, index_stripped; + u32 outbound_write_pointer, doneq_index, index_stripped, toggle; uint32_t addressLow, ccb_cdb_phy; int error; struct MessageUnit_D *pmu; @@ -2013,21 +1990,11 @@ static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb) doneq_index = pmu->doneq_index; if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) { do { - if (doneq_index & 0x4000) { - index_stripped = doneq_index & 0xFFF; - index_stripped += 1; - index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; - pmu->doneq_index = index_stripped - ? (index_stripped | 0x4000) : - (index_stripped + 1); - } else { - index_stripped = doneq_index; - index_stripped += 1; - index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; - pmu->doneq_index = index_stripped - ? index_stripped : - ((index_stripped | 0x4000) + 1); - } + toggle = doneq_index & 0x4000; + index_stripped = (doneq_index & 0xFFF) + 1; + index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped ? (index_stripped | toggle) : + ((toggle ^ 0x4000) + 1); doneq_index = pmu->doneq_index; addressLow = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; @@ -2890,7 +2857,7 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb) char __iomem *iop_firm_version; char __iomem *iop_device_map; u32 count; - struct MessageUnit_D *reg ; + struct MessageUnit_D *reg; void *dma_coherent2; dma_addr_t dma_coherent_handle2; struct pci_dev *pdev = acb->pdev; @@ -3223,7 +3190,7 @@ static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb, { bool error; uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy; - int rtn, doneq_index, index_stripped, outbound_write_pointer; + int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle; unsigned long flags; struct ARCMSR_CDB *arcmsr_cdb; struct CommandControlBlock *pCCB; @@ -3232,9 +3199,11 @@ static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb, polling_hbaD_ccb_retry: poll_count++; while (1) { + spin_lock_irqsave(&acb->doneq_lock, flags); outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; doneq_index = pmu->doneq_index; if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) { + spin_unlock_irqrestore(&acb->doneq_lock, flags); if (poll_ccb_done) { rtn = SUCCESS; break; @@ -3247,23 +3216,13 @@ polling_hbaD_ccb_retry: goto polling_hbaD_ccb_retry; } } - spin_lock_irqsave(&acb->doneq_lock, flags); - if (doneq_index & 0x4000) { - index_stripped = doneq_index & 0xFFF; - index_stripped += 1; - index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; - pmu->doneq_index = index_stripped ? - (index_stripped | 0x4000) : - (index_stripped + 1); - } else { - index_stripped = doneq_index; - index_stripped += 1; - index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; - pmu->doneq_index = index_stripped ? index_stripped : - ((index_stripped | 0x4000) + 1); - } - spin_unlock_irqrestore(&acb->doneq_lock, flags); + toggle = doneq_index & 0x4000; + index_stripped = (doneq_index & 0xFFF) + 1; + index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped ? (index_stripped | toggle) : + ((toggle ^ 0x4000) + 1); doneq_index = pmu->doneq_index; + spin_unlock_irqrestore(&acb->doneq_lock, flags); flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + -- cgit From f78f694c34ae6fdc29f5e80abb2dbb894f961772 Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Fri, 19 Sep 2014 12:17:27 +0530 Subject: mpt2sas: fix undefined reference to `__udivdi3' compilation errors This patch will fix the below compilation errors on i386 ARCH drivers/built-in.o: In function `_scsih_qcmd': mpt2sas_scsih.c:(.text+0x1e7b56): undefined reference to `__udivdi3' mpt2sas_scsih.c:(.text+0x1e7b8a): undefined reference to `__umoddi3' Used sector_div() API to fix above compilation errors. Signed-off-by: Sreekanth Reddy Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 992a224e51ee..c80ed0482649 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -3860,7 +3860,7 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request, u16 smid) { - sector_t v_lba, p_lba, stripe_off, stripe_unit, column, io_size; + sector_t v_lba, p_lba, stripe_off, column, io_size; u32 stripe_sz, stripe_exp; u8 num_pds, cmd = scmd->cmnd[0]; @@ -3888,9 +3888,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, num_pds = raid_device->num_pds; p_lba = v_lba >> stripe_exp; - stripe_unit = p_lba / num_pds; - column = p_lba % num_pds; - p_lba = (stripe_unit << stripe_exp) + stripe_off; + column = sector_div(p_lba, num_pds); + p_lba = (p_lba << stripe_exp) + stripe_off; + mpi_request->DevHandle = cpu_to_le16(raid_device->pd_handle[column]); if (cmd == READ_10 || cmd == WRITE_10) -- cgit From 6c1e7b7729b19eb41d93a411c82126a5993a8e90 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Tue, 2 Sep 2014 16:43:16 -0700 Subject: storvsc: get rid of overly verbose warning messages Get rid of the warning messages since they will clutter up various system logs and are of questionable value to the end user. For debugging purposes, this information can be gotten by setting the scsi log level appropriately. Signed-off-by: K. Y. Srinivasan Reviewed-by: Olaf Hering Signed-off-by: Christoph Hellwig --- drivers/scsi/storvsc_drv.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index fecac5d03fdd..733e5f759518 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1152,24 +1152,12 @@ static void storvsc_on_io_completion(struct hv_device *device, stor_pkt->vm_srb.sense_info_length = vstor_packet->vm_srb.sense_info_length; - if (vstor_packet->vm_srb.scsi_status != 0 || - vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){ - dev_warn(&device->device, - "cmd 0x%x scsi status 0x%x srb status 0x%x\n", - stor_pkt->vm_srb.cdb[0], - vstor_packet->vm_srb.scsi_status, - vstor_packet->vm_srb.srb_status); - } if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) { /* CHECK_CONDITION */ if (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID) { /* autosense data available */ - dev_warn(&device->device, - "stor pkt %p autosense data valid - len %d\n", - request, - vstor_packet->vm_srb.sense_info_length); memcpy(request->sense_buffer, vstor_packet->vm_srb.sense_data, -- cgit From 576b586303f76be9e5e2f5266ccbeba1b46bbd06 Mon Sep 17 00:00:00 2001 From: Anish Bhatt Date: Mon, 15 Sep 2014 17:44:18 -0700 Subject: cxgb4i: avoid holding mutex in interrupt context cxgbi_inet6addr_handler() can be called in interrupt context, so use rcu protected list while finding netdev Applies on top of core-for-3.18 Signed-off-by: Anish Bhatt Signed-off-by: Karen Xie Fixes: fc8d0590d914 ("libcxgbi: Add ipv6 api to driver") Fixes: 759a0cc5a3e1 ("cxgb4i: Add ipv6 code to driver, call into libcxgbi ipv6 api") Signed-off-by: Christoph Hellwig --- drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 2 +- drivers/scsi/cxgbi/libcxgbi.c | 57 ++++++++++++++++++++++++++++++++++---- drivers/scsi/cxgbi/libcxgbi.h | 3 ++ 3 files changed, 55 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 79788a12712d..02e69e7ee4a3 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1647,7 +1647,7 @@ static int cxgbi_inet6addr_handler(struct notifier_block *this, if (event_dev->priv_flags & IFF_802_1Q_VLAN) event_dev = vlan_dev_real_dev(event_dev); - cdev = cxgbi_device_find_by_netdev(event_dev, NULL); + cdev = cxgbi_device_find_by_netdev_rcu(event_dev, NULL); if (!cdev) return ret; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 4cc9787f55bd..6a2001d6b442 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -57,6 +57,9 @@ MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); static LIST_HEAD(cdev_list); static DEFINE_MUTEX(cdev_mutex); +static LIST_HEAD(cdev_rcu_list); +static DEFINE_SPINLOCK(cdev_rcu_lock); + int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, unsigned int max_conn) { @@ -142,6 +145,10 @@ struct cxgbi_device *cxgbi_device_register(unsigned int extra, list_add_tail(&cdev->list_head, &cdev_list); mutex_unlock(&cdev_mutex); + spin_lock(&cdev_rcu_lock); + list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list); + spin_unlock(&cdev_rcu_lock); + log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p# %u.\n", cdev, nports); return cdev; @@ -153,9 +160,16 @@ void cxgbi_device_unregister(struct cxgbi_device *cdev) log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p# %u,%s.\n", cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); + mutex_lock(&cdev_mutex); list_del(&cdev->list_head); mutex_unlock(&cdev_mutex); + + spin_lock(&cdev_rcu_lock); + list_del_rcu(&cdev->rcu_node); + spin_unlock(&cdev_rcu_lock); + synchronize_rcu(); + cxgbi_device_destroy(cdev); } EXPORT_SYMBOL_GPL(cxgbi_device_unregister); @@ -167,12 +181,9 @@ void cxgbi_device_unregister_all(unsigned int flag) mutex_lock(&cdev_mutex); list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { if ((cdev->flags & flag) == flag) { - log_debug(1 << CXGBI_DBG_DEV, - "cdev 0x%p, p# %u,%s.\n", - cdev, cdev->nports, cdev->nports ? - cdev->ports[0]->name : ""); - list_del(&cdev->list_head); - cxgbi_device_destroy(cdev); + mutex_unlock(&cdev_mutex); + cxgbi_device_unregister(cdev); + mutex_lock(&cdev_mutex); } } mutex_unlock(&cdev_mutex); @@ -191,6 +202,7 @@ struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) } } mutex_unlock(&cdev_mutex); + log_debug(1 << CXGBI_DBG_DEV, "lldev 0x%p, NO match found.\n", lldev); return NULL; @@ -230,6 +242,39 @@ struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, } EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); +struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, + int *port) +{ + struct net_device *vdev = NULL; + struct cxgbi_device *cdev; + int i; + + if (ndev->priv_flags & IFF_802_1Q_VLAN) { + vdev = ndev; + ndev = vlan_dev_real_dev(ndev); + pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); + } + + rcu_read_lock(); + list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) { + for (i = 0; i < cdev->nports; i++) { + if (ndev == cdev->ports[i]) { + cdev->hbas[i]->vdev = vdev; + rcu_read_unlock(); + if (port) + *port = i; + return cdev; + } + } + } + rcu_read_unlock(); + + log_debug(1 << CXGBI_DBG_DEV, + "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); + return NULL; +} +EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); + static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, int *port) { diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index b3e6e7541cc5..1d98fad6a0ab 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -527,6 +527,7 @@ struct cxgbi_ports_map { #define CXGBI_FLAG_IPV4_SET 0x10 struct cxgbi_device { struct list_head list_head; + struct list_head rcu_node; unsigned int flags; struct net_device **ports; void *lldev; @@ -709,6 +710,8 @@ void cxgbi_device_unregister(struct cxgbi_device *); void cxgbi_device_unregister_all(unsigned int flag); struct cxgbi_device *cxgbi_device_find_by_lldev(void *); struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *); +struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *, + int *); int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int, struct scsi_host_template *, struct scsi_transport_template *); -- cgit From 3185ea63907cb281cfdc5aa29aa2d855826ad16a Mon Sep 17 00:00:00 2001 From: "wenxiong@linux.vnet.ibm.com" Date: Wed, 24 Sep 2014 16:25:47 -0500 Subject: ipr: don't log error messages when applications issues illegal requests Failing Device information are logged when IOA firmware detected these illegal request such as IOA firmware doesn't support inquiry with page code 2. The patch fixes the issue. Signed-off-by: Brian King Tested-by: Wen Xiong Signed-off-by: Christoph Hellwig --- drivers/scsi/ipr.c | 10 ++++++++++ drivers/scsi/ipr.h | 1 + 2 files changed, 11 insertions(+) diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 924b0ba74dfe..2a9578c116b7 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -2440,6 +2440,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, { u32 ioasc; int error_index; + struct ipr_hostrcb_type_21_error *error; if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) return; @@ -2464,6 +2465,15 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, if (!ipr_error_table[error_index].log_hcam) return; + if (ioasc == IPR_IOASC_HW_CMD_FAILED && + hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { + error = &hostrcb->hcam.u.error64.u.type_21_error; + + if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && + ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) + return; + } + ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error); /* Set indication we have logged an error */ diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 31ed126f7143..8f8960d0bd73 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -130,6 +130,7 @@ #define IPR_IOASC_HW_DEV_BUS_STATUS 0x04448500 #define IPR_IOASC_IOASC_MASK 0xFFFFFF00 #define IPR_IOASC_SCSI_STATUS_MASK 0x000000FF +#define IPR_IOASC_HW_CMD_FAILEd 0x046E0000 #define IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT 0x05240000 #define IPR_IOASC_IR_RESOURCE_HANDLE 0x05250000 #define IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA 0x05258100 -- cgit From 2e9feb434a66311f30bde2430a5609e9e24df62b Mon Sep 17 00:00:00 2001 From: Ching Huang Date: Wed, 24 Sep 2014 17:33:34 +0800 Subject: arcmsr: simplify ioctl data read/write Signed-off-by: Ching Huang Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- drivers/scsi/arcmsr/arcmsr.h | 13 +-- drivers/scsi/arcmsr/arcmsr_attr.c | 127 ++++++++--------------- drivers/scsi/arcmsr/arcmsr_hba.c | 208 +++++++++++++------------------------- 3 files changed, 119 insertions(+), 229 deletions(-) diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index d1c78efc202e..3bcaaac0ae4b 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -52,7 +52,7 @@ struct device_attribute; #define ARCMSR_MAX_FREECCB_NUM 320 #define ARCMSR_MAX_OUTSTANDING_CMD 255 #endif -#define ARCMSR_DRIVER_VERSION "v1.30.00.04-20140428" +#define ARCMSR_DRIVER_VERSION "v1.30.00.04-20140919" #define ARCMSR_SCSI_INITIATOR_ID 255 #define ARCMSR_MAX_XFER_SECTORS 512 #define ARCMSR_MAX_XFER_SECTORS_B 4096 @@ -107,10 +107,11 @@ struct CMD_MESSAGE ** IOP Message Transfer Data for user space ******************************************************************************* */ +#define ARCMSR_API_DATA_BUFLEN 1032 struct CMD_MESSAGE_FIELD { struct CMD_MESSAGE cmdmessage; - uint8_t messagedatabuffer[1032]; + uint8_t messagedatabuffer[ARCMSR_API_DATA_BUFLEN]; }; /* IOP message transfer */ #define ARCMSR_MESSAGE_FAIL 0x0001 @@ -678,15 +679,15 @@ struct AdapterControlBlock unsigned int uncache_size; uint8_t rqbuffer[ARCMSR_MAX_QBUFFER]; /* data collection buffer for read from 80331 */ - int32_t rqbuf_firstindex; + int32_t rqbuf_getIndex; /* first of read buffer */ - int32_t rqbuf_lastindex; + int32_t rqbuf_putIndex; /* last of read buffer */ uint8_t wqbuffer[ARCMSR_MAX_QBUFFER]; /* data collection buffer for write to 80331 */ - int32_t wqbuf_firstindex; + int32_t wqbuf_getIndex; /* first of write buffer */ - int32_t wqbuf_lastindex; + int32_t wqbuf_putIndex; /* last of write buffer */ uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN]; /* id0 ..... id15, lun0...lun7 */ diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index 16422adcc531..9c86481f779f 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -68,7 +69,7 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp, struct device *dev = container_of(kobj,struct device,kobj); struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; - uint8_t *pQbuffer,*ptmpQbuffer; + uint8_t *ptmpQbuffer; int32_t allxfer_len = 0; unsigned long flags; @@ -78,57 +79,22 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp, /* do message unit read. */ ptmpQbuffer = (uint8_t *)buf; spin_lock_irqsave(&acb->rqbuffer_lock, flags); - if (acb->rqbuf_firstindex != acb->rqbuf_lastindex) { - pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; - if (acb->rqbuf_firstindex > acb->rqbuf_lastindex) { - if ((ARCMSR_MAX_QBUFFER - acb->rqbuf_firstindex) >= 1032) { - memcpy(ptmpQbuffer, pQbuffer, 1032); - acb->rqbuf_firstindex += 1032; - acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; - allxfer_len = 1032; - } else { - if (((ARCMSR_MAX_QBUFFER - acb->rqbuf_firstindex) - + acb->rqbuf_lastindex) > 1032) { - memcpy(ptmpQbuffer, pQbuffer, - ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex); - ptmpQbuffer += ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex; - memcpy(ptmpQbuffer, acb->rqbuffer, 1032 - - (ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex)); - acb->rqbuf_firstindex = 1032 - - (ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex); - allxfer_len = 1032; - } else { - memcpy(ptmpQbuffer, pQbuffer, - ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex); - ptmpQbuffer += ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex; - memcpy(ptmpQbuffer, acb->rqbuffer, - acb->rqbuf_lastindex); - allxfer_len = ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex + - acb->rqbuf_lastindex; - acb->rqbuf_firstindex = - acb->rqbuf_lastindex; - } - } - } else { - if ((acb->rqbuf_lastindex - acb->rqbuf_firstindex) > 1032) { - memcpy(ptmpQbuffer, pQbuffer, 1032); - acb->rqbuf_firstindex += 1032; - allxfer_len = 1032; - } else { - memcpy(ptmpQbuffer, pQbuffer, acb->rqbuf_lastindex - - acb->rqbuf_firstindex); - allxfer_len = acb->rqbuf_lastindex - - acb->rqbuf_firstindex; - acb->rqbuf_firstindex = acb->rqbuf_lastindex; - } + if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) { + unsigned int tail = acb->rqbuf_getIndex; + unsigned int head = acb->rqbuf_putIndex; + unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); + + allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); + if (allxfer_len > ARCMSR_API_DATA_BUFLEN) + allxfer_len = ARCMSR_API_DATA_BUFLEN; + + if (allxfer_len <= cnt_to_end) + memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len); + else { + memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end); + memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end); } + acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER; } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER __iomem *prbuffer; @@ -150,47 +116,42 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp, struct device *dev = container_of(kobj,struct device,kobj); struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; - int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; + int32_t user_len, cnt2end; uint8_t *pQbuffer, *ptmpuserbuffer; unsigned long flags; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (count > 1032) + if (count > ARCMSR_API_DATA_BUFLEN) return -EINVAL; /* do message unit write. */ ptmpuserbuffer = (uint8_t *)buf; user_len = (int32_t)count; spin_lock_irqsave(&acb->wqbuffer_lock, flags); - wqbuf_lastindex = acb->wqbuf_lastindex; - wqbuf_firstindex = acb->wqbuf_firstindex; - if (wqbuf_lastindex != wqbuf_firstindex) { + if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) { arcmsr_write_ioctldata2iop(acb); spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); return 0; /*need retry*/ } else { - my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) - &(ARCMSR_MAX_QBUFFER - 1); - if (my_empty_len >= user_len) { - while (user_len > 0) { - pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; - memcpy(pQbuffer, ptmpuserbuffer, 1); - acb->wqbuf_lastindex++; - acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; - ptmpuserbuffer++; - user_len--; - } - if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { - acb->acb_flags &= - ~ACB_F_MESSAGE_WQBUFFER_CLEARED; - arcmsr_write_ioctldata2iop(acb); - } - spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); - return count; - } else { - spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); - return 0; /*need retry*/ + pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex]; + cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex; + if (user_len > cnt2end) { + memcpy(pQbuffer, ptmpuserbuffer, cnt2end); + ptmpuserbuffer += cnt2end; + user_len -= cnt2end; + acb->wqbuf_putIndex = 0; + pQbuffer = acb->wqbuffer; } + memcpy(pQbuffer, ptmpuserbuffer, user_len); + acb->wqbuf_putIndex += user_len; + acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER; + if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { + acb->acb_flags &= + ~ACB_F_MESSAGE_WQBUFFER_CLEARED; + arcmsr_write_ioctldata2iop(acb); + } + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); + return count; } } @@ -215,12 +176,12 @@ static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp, | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); spin_lock_irqsave(&acb->rqbuffer_lock, flags); - acb->rqbuf_firstindex = 0; - acb->rqbuf_lastindex = 0; + acb->rqbuf_getIndex = 0; + acb->rqbuf_putIndex = 0; spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); spin_lock_irqsave(&acb->wqbuffer_lock, flags); - acb->wqbuf_firstindex = 0; - acb->wqbuf_lastindex = 0; + acb->wqbuf_getIndex = 0; + acb->wqbuf_putIndex = 0; spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); @@ -234,7 +195,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = { .name = "mu_read", .mode = S_IRUSR , }, - .size = 1032, + .size = ARCMSR_API_DATA_BUFLEN, .read = arcmsr_sysfs_iop_message_read, }; @@ -243,7 +204,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = { .name = "mu_write", .mode = S_IWUSR, }, - .size = 1032, + .size = ARCMSR_API_DATA_BUFLEN, .write = arcmsr_sysfs_iop_message_write, }; diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 0dd38ccc9470..0b44fb5ee485 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -58,6 +58,7 @@ #include #include #include +#include #include #include #include @@ -1701,16 +1702,15 @@ arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb, buf2 = (uint32_t *)buf1; } while (iop_len > 0) { - pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; + pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; *pQbuffer = *buf1; - acb->rqbuf_lastindex++; + acb->rqbuf_putIndex++; /* if last, index number set it to 0 */ - acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; + acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; buf1++; iop_len--; } - if (buf2) - kfree(buf2); + kfree(buf2); /* let IOP know data has been read */ arcmsr_iop_message_read(acb); return 1; @@ -1729,10 +1729,10 @@ arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, iop_data = (uint8_t __iomem *)prbuffer->data; iop_len = readl(&prbuffer->data_len); while (iop_len > 0) { - pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; + pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; *pQbuffer = readb(iop_data); - acb->rqbuf_lastindex++; - acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; + acb->rqbuf_putIndex++; + acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; iop_data++; iop_len--; } @@ -1748,7 +1748,7 @@ static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) spin_lock_irqsave(&acb->rqbuffer_lock, flags); prbuffer = arcmsr_get_iop_rqbuffer(acb); - buf_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) & + buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) & (ARCMSR_MAX_QBUFFER - 1); if (buf_empty_len >= readl(&prbuffer->data_len)) { if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) @@ -1775,12 +1775,12 @@ static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb) acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (uint32_t __iomem *)pwbuffer->data; - while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) + while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) && (allxfer_len < 124)) { - pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; + pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; *buf1 = *pQbuffer; - acb->wqbuf_firstindex++; - acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; + acb->wqbuf_getIndex++; + acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; buf1++; allxfer_len++; } @@ -1818,12 +1818,12 @@ arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb) acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (uint8_t __iomem *)pwbuffer->data; - while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) + while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) && (allxfer_len < 124)) { - pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; + pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; writeb(*pQbuffer, iop_data); - acb->wqbuf_firstindex++; - acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; + acb->wqbuf_getIndex++; + acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; iop_data++; allxfer_len++; } @@ -1838,9 +1838,9 @@ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) spin_lock_irqsave(&acb->wqbuffer_lock, flags); acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; - if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) + if (acb->wqbuf_getIndex != acb->wqbuf_putIndex) arcmsr_write_ioctldata2iop(acb); - if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) + if (acb->wqbuf_getIndex == acb->wqbuf_putIndex) acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); } @@ -2210,14 +2210,14 @@ void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb) for (i = 0; i < 15; i++) { if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; - acb->rqbuf_firstindex = 0; - acb->rqbuf_lastindex = 0; + acb->rqbuf_getIndex = 0; + acb->rqbuf_putIndex = 0; arcmsr_iop_message_read(acb); mdelay(30); - } else if (acb->rqbuf_firstindex != - acb->rqbuf_lastindex) { - acb->rqbuf_firstindex = 0; - acb->rqbuf_lastindex = 0; + } else if (acb->rqbuf_getIndex != + acb->rqbuf_putIndex) { + acb->rqbuf_getIndex = 0; + acb->rqbuf_putIndex = 0; mdelay(30); } else break; @@ -2256,9 +2256,9 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, switch (controlcode) { case ARCMSR_MESSAGE_READ_RQBUFFER: { unsigned char *ver_addr; - uint8_t *pQbuffer, *ptmpQbuffer; + uint8_t *ptmpQbuffer; uint32_t allxfer_len = 0; - ver_addr = kmalloc(1032, GFP_ATOMIC); + ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); if (!ver_addr) { retvalue = ARCMSR_MESSAGE_FAIL; pr_info("%s: memory not enough!\n", __func__); @@ -2266,66 +2266,22 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, } ptmpQbuffer = ver_addr; spin_lock_irqsave(&acb->rqbuffer_lock, flags); - if (acb->rqbuf_firstindex != acb->rqbuf_lastindex) { - pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; - if (acb->rqbuf_firstindex > acb->rqbuf_lastindex) { - if ((ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex) >= 1032) { - memcpy(ptmpQbuffer, pQbuffer, 1032); - acb->rqbuf_firstindex += 1032; - acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; - allxfer_len = 1032; - } else { - if (((ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex) + - acb->rqbuf_lastindex) > 1032) { - memcpy(ptmpQbuffer, - pQbuffer, ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex); - ptmpQbuffer += - ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex; - memcpy(ptmpQbuffer, - acb->rqbuffer, 1032 - - (ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex)); - acb->rqbuf_firstindex = - 1032 - (ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex); - allxfer_len = 1032; - } else { - memcpy(ptmpQbuffer, - pQbuffer, ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex); - ptmpQbuffer += - ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex; - memcpy(ptmpQbuffer, - acb->rqbuffer, - acb->rqbuf_lastindex); - allxfer_len = ARCMSR_MAX_QBUFFER - - acb->rqbuf_firstindex + - acb->rqbuf_lastindex; - acb->rqbuf_firstindex = - acb->rqbuf_lastindex; - } - } - } else { - if ((acb->rqbuf_lastindex - - acb->rqbuf_firstindex) > 1032) { - memcpy(ptmpQbuffer, pQbuffer, 1032); - acb->rqbuf_firstindex += 1032; - allxfer_len = 1032; - } else { - memcpy(ptmpQbuffer, pQbuffer, - acb->rqbuf_lastindex - - acb->rqbuf_firstindex); - allxfer_len = acb->rqbuf_lastindex - - acb->rqbuf_firstindex; - acb->rqbuf_firstindex = - acb->rqbuf_lastindex; - } + if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) { + unsigned int tail = acb->rqbuf_getIndex; + unsigned int head = acb->rqbuf_putIndex; + unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); + + allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); + if (allxfer_len > ARCMSR_API_DATA_BUFLEN) + allxfer_len = ARCMSR_API_DATA_BUFLEN; + + if (allxfer_len <= cnt_to_end) + memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len); + else { + memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end); + memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end); } + acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER; } memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len); @@ -2349,9 +2305,9 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, } case ARCMSR_MESSAGE_WRITE_WQBUFFER: { unsigned char *ver_addr; - int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; + int32_t user_len, cnt2end; uint8_t *pQbuffer, *ptmpuserbuffer; - ver_addr = kmalloc(1032, GFP_ATOMIC); + ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); if (!ver_addr) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; @@ -2361,9 +2317,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); spin_lock_irqsave(&acb->wqbuffer_lock, flags); - wqbuf_lastindex = acb->wqbuf_lastindex; - wqbuf_firstindex = acb->wqbuf_firstindex; - if (wqbuf_lastindex != wqbuf_firstindex) { + if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) { struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)cmd->sense_buffer; arcmsr_write_ioctldata2iop(acb); @@ -2375,48 +2329,22 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, sensebuffer->Valid = 1; retvalue = ARCMSR_MESSAGE_FAIL; } else { - my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) - & (ARCMSR_MAX_QBUFFER - 1); - if (my_empty_len >= user_len) { - while (user_len > 0) { - pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; - if ((acb->wqbuf_lastindex + user_len) - > ARCMSR_MAX_QBUFFER) { - memcpy(pQbuffer, ptmpuserbuffer, - ARCMSR_MAX_QBUFFER - - acb->wqbuf_lastindex); - ptmpuserbuffer += - (ARCMSR_MAX_QBUFFER - - acb->wqbuf_lastindex); - user_len -= (ARCMSR_MAX_QBUFFER - - acb->wqbuf_lastindex); - acb->wqbuf_lastindex = 0; - } else { - memcpy(pQbuffer, ptmpuserbuffer, - user_len); - acb->wqbuf_lastindex += user_len; - acb->wqbuf_lastindex %= - ARCMSR_MAX_QBUFFER; - user_len = 0; - } - } - if (acb->acb_flags & - ACB_F_MESSAGE_WQBUFFER_CLEARED) { - acb->acb_flags &= + pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex]; + cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex; + if (user_len > cnt2end) { + memcpy(pQbuffer, ptmpuserbuffer, cnt2end); + ptmpuserbuffer += cnt2end; + user_len -= cnt2end; + acb->wqbuf_putIndex = 0; + pQbuffer = acb->wqbuffer; + } + memcpy(pQbuffer, ptmpuserbuffer, user_len); + acb->wqbuf_putIndex += user_len; + acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER; + if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { + acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; - arcmsr_write_ioctldata2iop(acb); - } - } else { - struct SENSE_DATA *sensebuffer = - (struct SENSE_DATA *)cmd->sense_buffer; - /* has error report sensedata */ - sensebuffer->ErrorCode = - SCSI_SENSE_CURRENT_ERRORS; - sensebuffer->SenseKey = ILLEGAL_REQUEST; - sensebuffer->AdditionalSenseLength = 0x0A; - sensebuffer->AdditionalSenseCode = 0x20; - sensebuffer->Valid = 1; - retvalue = ARCMSR_MESSAGE_FAIL; + arcmsr_write_ioctldata2iop(acb); } } spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); @@ -2435,8 +2363,8 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, arcmsr_clear_iop2drv_rqueue_buffer(acb); spin_lock_irqsave(&acb->rqbuffer_lock, flags); acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; - acb->rqbuf_firstindex = 0; - acb->rqbuf_lastindex = 0; + acb->rqbuf_getIndex = 0; + acb->rqbuf_putIndex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); if (acb->fw_flag == FW_DEADLOCK) @@ -2452,8 +2380,8 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, spin_lock_irqsave(&acb->wqbuffer_lock, flags); acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); - acb->wqbuf_firstindex = 0; - acb->wqbuf_lastindex = 0; + acb->wqbuf_getIndex = 0; + acb->wqbuf_putIndex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); if (acb->fw_flag == FW_DEADLOCK) @@ -2469,16 +2397,16 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, arcmsr_clear_iop2drv_rqueue_buffer(acb); spin_lock_irqsave(&acb->rqbuffer_lock, flags); acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; - acb->rqbuf_firstindex = 0; - acb->rqbuf_lastindex = 0; + acb->rqbuf_getIndex = 0; + acb->rqbuf_putIndex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); spin_lock_irqsave(&acb->wqbuffer_lock, flags); acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); - acb->wqbuf_firstindex = 0; - acb->wqbuf_lastindex = 0; + acb->wqbuf_getIndex = 0; + acb->wqbuf_putIndex = 0; pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); -- cgit From 859c75aba20264d87dd026bab0d0ca3bff385955 Mon Sep 17 00:00:00 2001 From: Tomas Henzl Date: Fri, 12 Sep 2014 14:44:15 +0200 Subject: hpsa: add missing pci_set_master in kdump path Add a call to pci_set_master(...) missing in the previous patch "hpsa: refine the pci enable/disable handling". Found thanks to Rob Elliot. Signed-off-by: Tomas Henzl Reviewed-by: Robert Elliott Tested-by: Robert Elliott Signed-off-by: Christoph Hellwig --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 7828834e212b..cef5d49b59cd 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6544,7 +6544,7 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev) dev_warn(&pdev->dev, "failed to enable device.\n"); return -ENODEV; } - + pci_set_master(pdev); /* Reset the controller with a PCI power-cycle or via doorbell */ rc = hpsa_kdump_hard_reset_controller(pdev); -- cgit From 4089b71cc820a426d601283c92fcd4ffeb5139c2 Mon Sep 17 00:00:00 2001 From: Chris J Arges Date: Tue, 23 Sep 2014 09:22:25 -0500 Subject: mptfusion: enable no_write_same for vmware scsi disks When using a virtual SCSI disk in a VMWare VM if blkdev_issue_zeroout is used data can be improperly zeroed out using the mptfusion driver. This patch disables write_same for this driver and the vmware subsystem_vendor which ensures that manual zeroing out is used instead. Cc: stable@vger.kernel.org BugLink: http://bugs.launchpad.net/bugs/1371591 Reported-by: Bruce Lucas Tested-by: Chris J Arges Signed-off-by: Chris J Arges Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/message/fusion/mptspi.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 787933d43d32..613231c16194 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -1419,6 +1419,11 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_mptspi_probe; } + /* VMWare emulation doesn't properly implement WRITE_SAME + */ + if (pdev->subsystem_vendor == 0x15AD) + sh->no_write_same = 1; + spin_lock_irqsave(&ioc->FreeQlock, flags); /* Attach the SCSI Host to the IOC structure -- cgit From 299f5e27ac5fac42f5be38a1cdf004b4e8217cbf Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:32 -0400 Subject: qla2xxx: ISP27xx add tests for incomplete template. Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_dbg.c | 2 +- drivers/scsi/qla2xxx/qla_tmpl.c | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 41117b3a50f0..55edaa2a0f3f 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -64,7 +64,7 @@ * | | | 0xb13c-0xb140 | * | | | 0xb149 | * | MultiQ | 0xc00c | | - * | Misc | 0xd212 | 0xd017-0xd019 | + * | Misc | 0xd212 | 0xd017 | * | | | 0xd020 | * | | | 0xd030-0xd0ff | * | | | 0xd101-0xd1fe | diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index cb9a0c4bc419..252de5d7bff6 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -792,6 +792,15 @@ qla27xx_walk_template(struct scsi_qla_host *vha, break; ent = qla27xx_next_entry(ent); } + + if (count) + ql_dbg(ql_dbg_misc, vha, 0xd018, + "%s: residual count (%lx)\n", __func__, count); + + if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END) + ql_dbg(ql_dbg_misc, vha, 0xd019, + "%s: missing end (%lx)\n", __func__, count); + ql_dbg(ql_dbg_misc, vha, 0xd01b, "%s: len=%lx\n", __func__, *len); } -- cgit From aa2dc3727a934e63a74e6dd5f017d263d9baa70a Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:33 -0400 Subject: qla2xxx: ISP27xx optimize fwdump entry table lookup. Since the entry call array is sorted in order of entry type opcode, the search can be terminated as soon as the search key is exceeded. Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_tmpl.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 252de5d7bff6..29271cc7659a 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -726,7 +726,7 @@ qla27xx_fwdt_entry_other(struct scsi_qla_host *vha, } struct qla27xx_fwdt_entry_call { - int type; + uint type; int (*call)( struct scsi_qla_host *, struct qla27xx_fwdt_entry *, @@ -759,15 +759,17 @@ static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = { { -1 , qla27xx_fwdt_entry_other } }; -static inline int (*qla27xx_find_entry(int type)) +static inline int (*qla27xx_find_entry(uint type)) (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *) { struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list; - while (list->type != -1 && list->type != type) + while (list->type < type) list++; - return list->call; + if (list->type == type) + return list->call; + return qla27xx_fwdt_entry_other; } static inline void * -- cgit From 01cb65f1bb3e71df1a0eb3393d3066ea0161a425 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:34 -0400 Subject: qla2xxx: ISP27xx fwdump template remove high frequency debug logs. Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_dbg.c | 2 +- drivers/scsi/qla2xxx/qla_tmpl.c | 17 ----------------- 2 files changed, 1 insertion(+), 18 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 55edaa2a0f3f..61bd49f4f7bf 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -64,7 +64,7 @@ * | | | 0xb13c-0xb140 | * | | | 0xb149 | * | MultiQ | 0xc00c | | - * | Misc | 0xd212 | 0xd017 | + * | Misc | 0xd212 | 0xd011-0xd017 | * | | | 0xd020 | * | | | 0xd030-0xd0ff | * | | | 0xd101-0xd1fe | diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 29271cc7659a..6da9dd272eb3 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -151,8 +151,6 @@ qla27xx_read8(void *window, void *buf, ulong *len) if (buf) { value = RD_REG_BYTE((__iomem void *)window); - ql_dbg(ql_dbg_misc, NULL, 0xd011, - "%s: -> %x\n", __func__, value); } qla27xx_insert32(value, buf, len); } @@ -164,8 +162,6 @@ qla27xx_read16(void *window, void *buf, ulong *len) if (buf) { value = RD_REG_WORD((__iomem void *)window); - ql_dbg(ql_dbg_misc, NULL, 0xd012, - "%s: -> %x\n", __func__, value); } qla27xx_insert32(value, buf, len); } @@ -177,8 +173,6 @@ qla27xx_read32(void *window, void *buf, ulong *len) if (buf) { value = RD_REG_DWORD((__iomem void *)window); - ql_dbg(ql_dbg_misc, NULL, 0xd013, - "%s: -> %x\n", __func__, value); } qla27xx_insert32(value, buf, len); } @@ -197,10 +191,6 @@ qla27xx_read_reg(__iomem struct device_reg_24xx *reg, { void *window = (void *)reg + offset; - if (buf) { - ql_dbg(ql_dbg_misc, NULL, 0xd014, - "%s: @%x\n", __func__, offset); - } qla27xx_read32(window, buf, len); } @@ -211,8 +201,6 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg, __iomem void *window = reg + offset; if (buf) { - ql_dbg(ql_dbg_misc, NULL, 0xd015, - "%s: @%x <- %x\n", __func__, offset, data); WRT_REG_DWORD(window, data); } } @@ -225,11 +213,6 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg, void *window = (void *)reg + offset; void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width); - if (buf) { - ql_dbg(ql_dbg_misc, NULL, 0xd016, - "%s: base=%x offset=%x count=%x width=%x\n", - __func__, addr, offset, count, width); - } qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf); while (count--) { qla27xx_insert32(addr, buf, len); -- cgit From ce9b9b0858c6806ae84072d5fabb5ae0ca3f2799 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:35 -0400 Subject: qla2xxx: ISP27xx fwdump template fix insertbuf() routine. Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_tmpl.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 6da9dd272eb3..6f2f7b29607d 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -128,18 +128,10 @@ qla27xx_insert32(uint32_t value, void *buf, ulong *len) static inline void qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len) { - ulong cnt = size; - if (buf && mem) { + if (buf && mem && size) { buf += *len; - while (cnt >= sizeof(uint32_t)) { - *(__le32 *)buf = cpu_to_le32p(mem); - buf += sizeof(uint32_t); - mem += sizeof(uint32_t); - cnt -= sizeof(uint32_t); - } - if (cnt) - memcpy(buf, mem, cnt); + memcpy(buf, mem, size); } *len += size; } -- cgit From 2ac224bc0e9021520be95f6678c94440d526aa49 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:36 -0400 Subject: qla2xxx: Add ISP27xx fwdump template entry T275 (insert buffer). Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_dbg.c | 7 +++---- drivers/scsi/qla2xxx/qla_tmpl.c | 27 +++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_tmpl.h | 6 ++++++ 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 61bd49f4f7bf..fff8769f0b33 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -64,11 +64,10 @@ * | | | 0xb13c-0xb140 | * | | | 0xb149 | * | MultiQ | 0xc00c | | - * | Misc | 0xd212 | 0xd011-0xd017 | - * | | | 0xd020 | - * | | | 0xd030-0xd0ff | + * | Misc | 0xd213 | 0xd011-0xd017 | + * | | | 0xd031-0xd0ff | * | | | 0xd101-0xd1fe | - * | | | 0xd213-0xd2fe | + * | | | 0xd214-0xd2fe | * | Target Mode | 0xe078 | | * | Target Mode Management | 0xf072 | 0xf002-0xf003 | * | | | 0xf046-0xf049 | diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 6f2f7b29607d..d92ee068e802 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -689,6 +689,32 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, return false; } +static int +qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong offset = offsetof(typeof(*ent), t275.buffer); + + ql_dbg(ql_dbg_misc, vha, 0xd213, + "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len); + if (!ent->t275.length) { + ql_dbg(ql_dbg_misc, vha, 0xd020, + "%s: buffer zero length\n", __func__); + qla27xx_skip_entry(ent, buf); + goto done; + } + if (offset + ent->t275.length > ent->hdr.entry_size) { + ql_dbg(ql_dbg_misc, vha, 0xd030, + "%s: buffer overflow\n", __func__); + qla27xx_skip_entry(ent, buf); + goto done; + } + + qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len); +done: + return false; +} + static int qla27xx_fwdt_entry_other(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) @@ -731,6 +757,7 @@ static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = { { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } , { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } , { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } , + { ENTRY_TYPE_WRITE_BUF , qla27xx_fwdt_entry_t275 } , { -1 , qla27xx_fwdt_entry_other } }; diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h index 1967424c8e64..f19856bb873f 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.h +++ b/drivers/scsi/qla2xxx/qla_tmpl.h @@ -53,6 +53,7 @@ struct __packed qla27xx_fwdt_template { #define ENTRY_TYPE_RDREMRAM 272 #define ENTRY_TYPE_PCICFG 273 #define ENTRY_TYPE_GET_SHADOW 274 +#define ENTRY_TYPE_WRITE_BUF 275 #define CAPTURE_FLAG_PHYS_ONLY BIT_0 #define CAPTURE_FLAG_PHYS_VIRT BIT_1 @@ -193,6 +194,11 @@ struct __packed qla27xx_fwdt_entry { uint8_t queue_type; uint8_t reserved[3]; } t274; + + struct __packed { + uint32_t length; + uint8_t buffer[]; + } t275; }; }; -- cgit From 420854b3cda9e7e0ff1ace8dba3ad45387d15d14 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Thu, 25 Sep 2014 05:16:37 -0400 Subject: qla2xxx: Enable fast flash access for ISP83xx. Signed-off-by: Chad Dupuis Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_sup.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index bca173e56f16..04b370182742 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -2580,7 +2580,8 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, uint32_t faddr, left, burst; struct qla_hw_data *ha = vha->hw; - if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha)) + if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || + IS_QLA27XX(ha)) goto try_fast; if (offset & 0xfff) goto slow_read; -- cgit From 98aee70d19a7e3203649fa2078464e4f402a0ad8 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:38 -0400 Subject: qla2xxx: Add endianizer to max_payload_size modifier. Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_fw.h | 2 +- drivers/scsi/qla2xxx/qla_init.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index eb8f57249f1d..c7d1c4569fa1 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -91,7 +91,7 @@ struct nvram_24xx { /* Firmware Initialization Control Block. */ uint16_t version; uint16_t reserved_1; - uint16_t frame_payload_size; + __le16 frame_payload_size; uint16_t execution_throttle; uint16_t exchange_count; uint16_t hard_address; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 46990f4ceb40..748bd9093680 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2596,18 +2596,18 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) nv->firmware_options[1] = BIT_7 | BIT_5; nv->add_firmware_options[0] = BIT_5; nv->add_firmware_options[1] = BIT_5 | BIT_4; - nv->frame_payload_size = __constant_cpu_to_le16(2048); + nv->frame_payload_size = 2048; nv->special_options[1] = BIT_7; } else if (IS_QLA2200(ha)) { nv->firmware_options[0] = BIT_2 | BIT_1; nv->firmware_options[1] = BIT_7 | BIT_5; nv->add_firmware_options[0] = BIT_5; nv->add_firmware_options[1] = BIT_5 | BIT_4; - nv->frame_payload_size = __constant_cpu_to_le16(1024); + nv->frame_payload_size = 1024; } else if (IS_QLA2100(ha)) { nv->firmware_options[0] = BIT_3 | BIT_1; nv->firmware_options[1] = BIT_5; - nv->frame_payload_size = __constant_cpu_to_le16(1024); + nv->frame_payload_size = 1024; } nv->max_iocb_allocation = __constant_cpu_to_le16(256); @@ -2643,7 +2643,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) * are valid. */ if (ia64_platform_is("sn2")) { - nv->frame_payload_size = __constant_cpu_to_le16(2048); + nv->frame_payload_size = 2048; if (IS_QLA23XX(ha)) nv->special_options[1] = BIT_7; } @@ -4958,7 +4958,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) memset(nv, 0, ha->nvram_size); nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); nv->version = __constant_cpu_to_le16(ICB_VERSION); - nv->frame_payload_size = __constant_cpu_to_le16(2048); + nv->frame_payload_size = 2048; nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); nv->exchange_count = __constant_cpu_to_le16(0); nv->hard_address = __constant_cpu_to_le16(124); @@ -5905,7 +5905,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) memset(nv, 0, ha->nvram_size); nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); nv->version = __constant_cpu_to_le16(ICB_VERSION); - nv->frame_payload_size = __constant_cpu_to_le16(2048); + nv->frame_payload_size = 2048; nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); nv->exchange_count = __constant_cpu_to_le16(0); nv->port_name[0] = 0x21; -- cgit From 4096953054ad4369774f9dadc125ef7a57bb9ed0 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:39 -0400 Subject: qla2xxx: ISP25xx multiqueue shadow register crash fix. When creating request/response queues from qla25xx_setup_mode(), the shadow index register pointers were not being initialized to point at the registers. Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_mid.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 89998244f48d..5c2e0317f1c0 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -702,6 +702,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->req_q_in = ®->isp25mq.req_q_in; req->req_q_out = ®->isp25mq.req_q_out; req->max_q_depth = ha->req_q_map[0]->max_q_depth; + req->out_ptr = (void *)(req->ring + req->length); mutex_unlock(&ha->vport_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc004, "ring_ptr=%p ring_index=%d, " @@ -811,6 +812,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, reg = ISP_QUE_REG(ha, que_id); rsp->rsp_q_in = ®->isp25mq.rsp_q_in; rsp->rsp_q_out = ®->isp25mq.rsp_q_out; + rsp->in_ptr = (void *)(rsp->ring + rsp->length); mutex_unlock(&ha->vport_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", -- cgit From 349c390f43c63c2c919b81beb84b3a7592f2dd14 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:40 -0400 Subject: qla2xxx: ISP27xx fwdump template error print simplification. Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_dbg.c | 3 +++ drivers/scsi/qla2xxx/qla_tmpl.c | 31 ++++--------------------------- drivers/scsi/qla2xxx/qla_tmpl.h | 2 ++ 3 files changed, 9 insertions(+), 27 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index fff8769f0b33..d60c1c6af6c7 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -65,6 +65,9 @@ * | | | 0xb149 | * | MultiQ | 0xc00c | | * | Misc | 0xd213 | 0xd011-0xd017 | + * | | | 0xd021,0xd024 | + * | | | 0xd025,0xd029 | + * | | | 0xd02a,0xd02e | * | | | 0xd031-0xd0ff | * | | | 0xd101-0xd1fe | * | | | 0xd214-0xd2fe | diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index d92ee068e802..a8c0c7362e48 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -355,14 +355,9 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, ent->t262.start_addr = start; ent->t262.end_addr = end; } - } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) { - ql_dbg(ql_dbg_misc, vha, 0xd021, - "%s: unsupported ddr ram\n", __func__); - qla27xx_skip_entry(ent, buf); - goto done; } else { ql_dbg(ql_dbg_misc, vha, 0xd022, - "%s: unknown area %u\n", __func__, ent->t262.ram_area); + "%s: unknown area %x\n", __func__, ent->t262.ram_area); qla27xx_skip_entry(ent, buf); goto done; } @@ -377,8 +372,6 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, dwords = end - start + 1; if (buf) { - ql_dbg(ql_dbg_misc, vha, 0xd024, - "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords); buf += *len; qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf); } @@ -423,13 +416,9 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, count++; } } - } else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { - ql_dbg(ql_dbg_misc, vha, 0xd025, - "%s: unsupported atio queue\n", __func__); - qla27xx_skip_entry(ent, buf); } else { ql_dbg(ql_dbg_misc, vha, 0xd026, - "%s: unknown queue %u\n", __func__, ent->t263.queue_type); + "%s: unknown queue %x\n", __func__, ent->t263.queue_type); qla27xx_skip_entry(ent, buf); } @@ -524,17 +513,9 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha, "%s: missing eft\n", __func__); qla27xx_skip_entry(ent, buf); } - } else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) { - ql_dbg(ql_dbg_misc, vha, 0xd029, - "%s: unsupported exchange offload buffer\n", __func__); - qla27xx_skip_entry(ent, buf); - } else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) { - ql_dbg(ql_dbg_misc, vha, 0xd02a, - "%s: unsupported extended login buffer\n", __func__); - qla27xx_skip_entry(ent, buf); } else { ql_dbg(ql_dbg_misc, vha, 0xd02b, - "%s: unknown buf %x\n", __func__, ent->t268.buf_type); + "%s: unknown buffer %x\n", __func__, ent->t268.buf_type); qla27xx_skip_entry(ent, buf); } @@ -670,13 +651,9 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, count++; } } - } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { - ql_dbg(ql_dbg_misc, vha, 0xd02e, - "%s: unsupported atio queue\n", __func__); - qla27xx_skip_entry(ent, buf); } else { ql_dbg(ql_dbg_misc, vha, 0xd02f, - "%s: unknown queue %u\n", __func__, ent->t274.queue_type); + "%s: unknown queue %x\n", __func__, ent->t274.queue_type); qla27xx_skip_entry(ent, buf); } diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h index f19856bb873f..141c1c5e73f4 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.h +++ b/drivers/scsi/qla2xxx/qla_tmpl.h @@ -214,6 +214,8 @@ struct __packed qla27xx_fwdt_entry { #define T268_BUF_TYPE_EXTD_TRACE 1 #define T268_BUF_TYPE_EXCH_BUFOFF 2 #define T268_BUF_TYPE_EXTD_LOGIN 3 +#define T268_BUF_TYPE_REQ_MIRROR 4 +#define T268_BUF_TYPE_RSP_MIRROR 5 #define T274_QUEUE_TYPE_REQ_SHAD 1 #define T274_QUEUE_TYPE_RSP_SHAD 2 -- cgit From efcdf9f5c8dfb1fa2b052e58e4ccb85541aee2cd Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:41 -0400 Subject: qla2xxx: Fix potential return count bug in qla2xxx_get_vpd_field(). Call scnprintf() instead of snprintf() since the latter may return an incorrect count in cases where the write is truncated to fit. scnprintf() returns the count of what was actually written; snprintf() returns the count of what would have been written. Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_sup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 04b370182742..4788ecd0c42a 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -3163,7 +3163,7 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size) } if (pos < end - len && *pos != 0x78) - return snprintf(str, size, "%.*s", len, pos + 3); + return scnprintf(str, size, "%.*s", len, pos + 3); return 0; } -- cgit From 4bb2efc45ffb7e5f249e97f86a387f6f3a7daae3 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:42 -0400 Subject: qla2xxx: ISPFX00 avoid writing semaphore register in request_irqs(). Semaphore register does not exist for ISPFx00. Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_isr.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index a0992bfab7bf..223c1a89af5c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3098,10 +3098,11 @@ skip_msi: } clear_risc_ints: + if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) + goto fail; spin_lock_irq(&ha->hardware_lock); - if (!IS_FWI2_CAPABLE(ha)) - WRT_REG_WORD(®->isp.semaphore, 0); + WRT_REG_WORD(®->isp.semaphore, 0); spin_unlock_irq(&ha->hardware_lock); fail: -- cgit From 7b711623e1924b4ca1b06610f694c71b0feb05f9 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:43 -0400 Subject: qla2xxx: Incorrect debug level on mailbox command print 0x1111. Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_mbx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index a7a373f38ca8..ca7f4f012511 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -117,7 +117,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) command = mcp->mb[0]; mboxes = mcp->out_mb; - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111, + ql_dbg(ql_dbg_mbx, vha, 0x1111, "Mailbox registers (OUT):\n"); for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) -- cgit From 24a42d50efdbb279f26c179e980e3420532fdf2a Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:44 -0400 Subject: qla2xxx: Incorrect linked list semantic in qlafx00_get_fcport(). Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_mr.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 8ecf6decea67..620e60a7a0ee 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1675,17 +1675,16 @@ qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id) fc_port_t *fcport; /* Check for matching device in remote port list. */ - fcport = NULL; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->tgt_id == tgt_id) { ql_dbg(ql_dbg_async, vha, 0x5072, "Matching fcport(%p) found with TGT-ID: 0x%x " "and Remote TGT_ID: 0x%x\n", fcport, fcport->tgt_id, tgt_id); - break; + return fcport; } } - return fcport; + return NULL; } static void -- cgit From e475f9c0a5a12dfc79647f5a9b222f20db5f61a5 Mon Sep 17 00:00:00 2001 From: Jan Kulich Date: Thu, 25 Sep 2014 05:16:45 -0400 Subject: qla2xxx: ISPFx00 unexpected resets during adapter boot sequence. Signed-off-by: Jan Kulich Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_mr.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 620e60a7a0ee..c54a53de351b 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1551,7 +1551,10 @@ qlafx00_timer_routine(scsi_qla_host_t *vha) ha->mr.fw_reset_timer_tick = QLAFX00_MAX_RESET_INTERVAL; } - ha->mr.old_aenmbx0_state = aenmbx0; + if (ha->mr.old_aenmbx0_state != aenmbx0) { + ha->mr.old_aenmbx0_state = aenmbx0; + ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; + } ha->mr.fw_reset_timer_tick--; } if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) { -- cgit From df57cabac41fd54e8eb73f9de482f054eb0b98eb Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 05:16:46 -0400 Subject: qla2xxx: Add FDMI-2 functionality. Add support for the FDMI-2 fabric switch feature. Since FDMI-2 uses code from FDMI-1, some of the existing code needed to be repaired to prevent fields from being overflowed. Signed-off-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_attr.c | 5 +- drivers/scsi/qla2xxx/qla_dbg.c | 4 +- drivers/scsi/qla2xxx/qla_def.h | 154 +++++-- drivers/scsi/qla2xxx/qla_gbl.h | 4 +- drivers/scsi/qla2xxx/qla_gs.c | 943 +++++++++++++++++++++++++++++++++++----- drivers/scsi/qla2xxx/qla_mr.c | 4 +- drivers/scsi/qla2xxx/qla_os.c | 13 +- 7 files changed, 967 insertions(+), 160 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 16fe5196e6d9..868f4e5bd863 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1014,7 +1014,7 @@ qla2x00_fw_version_show(struct device *dev, char fw_str[128]; return scnprintf(buf, PAGE_SIZE, "%s\n", - ha->isp_ops->fw_version_str(vha, fw_str)); + ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str))); } static ssize_t @@ -1924,7 +1924,8 @@ qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); - qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost)); + qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost), + sizeof(fc_host_symbolic_name(shost))); } static void diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index d60c1c6af6c7..28442d082ba1 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -20,9 +20,9 @@ * | | | 0x1115-0x1116 | * | | | 0x111a-0x111b | * | | | 0x1155-0x1158 | - * | Device Discovery | 0x2095 | 0x2020-0x2022, | + * | Device Discovery | 0x2016 | 0x2020-0x2022, | * | | | 0x2011-0x2012, | - * | | | 0x2016 | + * | | | 0x2099-0x20a4 | * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index de5a9c471cf9..e5baead2369a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2056,10 +2056,21 @@ static const char * const port_state_str[] = { #define CT_REJECT_RESPONSE 0x8001 #define CT_ACCEPT_RESPONSE 0x8002 -#define CT_REASON_INVALID_COMMAND_CODE 0x01 -#define CT_REASON_CANNOT_PERFORM 0x09 -#define CT_REASON_COMMAND_UNSUPPORTED 0x0b -#define CT_EXPL_ALREADY_REGISTERED 0x10 +#define CT_REASON_INVALID_COMMAND_CODE 0x01 +#define CT_REASON_CANNOT_PERFORM 0x09 +#define CT_REASON_COMMAND_UNSUPPORTED 0x0b +#define CT_EXPL_ALREADY_REGISTERED 0x10 +#define CT_EXPL_HBA_ATTR_NOT_REGISTERED 0x11 +#define CT_EXPL_MULTIPLE_HBA_ATTR 0x12 +#define CT_EXPL_INVALID_HBA_BLOCK_LENGTH 0x13 +#define CT_EXPL_MISSING_REQ_HBA_ATTR 0x14 +#define CT_EXPL_PORT_NOT_REGISTERED_ 0x15 +#define CT_EXPL_MISSING_HBA_ID_PORT_LIST 0x16 +#define CT_EXPL_HBA_NOT_REGISTERED 0x17 +#define CT_EXPL_PORT_ATTR_NOT_REGISTERED 0x20 +#define CT_EXPL_PORT_NOT_REGISTERED 0x21 +#define CT_EXPL_MULTIPLE_PORT_ATTR 0x22 +#define CT_EXPL_INVALID_PORT_BLOCK_LENGTH 0x23 #define NS_N_PORT_TYPE 0x01 #define NS_NL_PORT_TYPE 0x02 @@ -2116,33 +2127,40 @@ static const char * const port_state_str[] = { * HBA attribute types. */ #define FDMI_HBA_ATTR_COUNT 9 -#define FDMI_HBA_NODE_NAME 1 -#define FDMI_HBA_MANUFACTURER 2 -#define FDMI_HBA_SERIAL_NUMBER 3 -#define FDMI_HBA_MODEL 4 -#define FDMI_HBA_MODEL_DESCRIPTION 5 -#define FDMI_HBA_HARDWARE_VERSION 6 -#define FDMI_HBA_DRIVER_VERSION 7 -#define FDMI_HBA_OPTION_ROM_VERSION 8 -#define FDMI_HBA_FIRMWARE_VERSION 9 +#define FDMIV2_HBA_ATTR_COUNT 17 +#define FDMI_HBA_NODE_NAME 0x1 +#define FDMI_HBA_MANUFACTURER 0x2 +#define FDMI_HBA_SERIAL_NUMBER 0x3 +#define FDMI_HBA_MODEL 0x4 +#define FDMI_HBA_MODEL_DESCRIPTION 0x5 +#define FDMI_HBA_HARDWARE_VERSION 0x6 +#define FDMI_HBA_DRIVER_VERSION 0x7 +#define FDMI_HBA_OPTION_ROM_VERSION 0x8 +#define FDMI_HBA_FIRMWARE_VERSION 0x9 #define FDMI_HBA_OS_NAME_AND_VERSION 0xa #define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb +#define FDMI_HBA_NODE_SYMBOLIC_NAME 0xc +#define FDMI_HBA_VENDOR_ID 0xd +#define FDMI_HBA_NUM_PORTS 0xe +#define FDMI_HBA_FABRIC_NAME 0xf +#define FDMI_HBA_BOOT_BIOS_NAME 0x10 +#define FDMI_HBA_TYPE_VENDOR_IDENTIFIER 0xe0 struct ct_fdmi_hba_attr { uint16_t type; uint16_t len; union { uint8_t node_name[WWN_SIZE]; - uint8_t manufacturer[32]; - uint8_t serial_num[8]; + uint8_t manufacturer[64]; + uint8_t serial_num[32]; uint8_t model[16]; uint8_t model_desc[80]; - uint8_t hw_version[16]; + uint8_t hw_version[32]; uint8_t driver_version[32]; uint8_t orom_version[16]; - uint8_t fw_version[16]; + uint8_t fw_version[32]; uint8_t os_version[128]; - uint8_t max_ct_len[4]; + uint32_t max_ct_len; } a; }; @@ -2151,16 +2169,56 @@ struct ct_fdmi_hba_attributes { struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT]; }; +struct ct_fdmiv2_hba_attr { + uint16_t type; + uint16_t len; + union { + uint8_t node_name[WWN_SIZE]; + uint8_t manufacturer[32]; + uint8_t serial_num[32]; + uint8_t model[16]; + uint8_t model_desc[80]; + uint8_t hw_version[16]; + uint8_t driver_version[32]; + uint8_t orom_version[16]; + uint8_t fw_version[32]; + uint8_t os_version[128]; + uint32_t max_ct_len; + uint8_t sym_name[256]; + uint32_t vendor_id; + uint32_t num_ports; + uint8_t fabric_name[WWN_SIZE]; + uint8_t bios_name[32]; + uint8_t vendor_indentifer[8]; + } a; +}; + +struct ct_fdmiv2_hba_attributes { + uint32_t count; + struct ct_fdmiv2_hba_attr entry[FDMIV2_HBA_ATTR_COUNT]; +}; + /* * Port attribute types. */ #define FDMI_PORT_ATTR_COUNT 6 -#define FDMI_PORT_FC4_TYPES 1 -#define FDMI_PORT_SUPPORT_SPEED 2 -#define FDMI_PORT_CURRENT_SPEED 3 -#define FDMI_PORT_MAX_FRAME_SIZE 4 -#define FDMI_PORT_OS_DEVICE_NAME 5 -#define FDMI_PORT_HOST_NAME 6 +#define FDMIV2_PORT_ATTR_COUNT 16 +#define FDMI_PORT_FC4_TYPES 0x1 +#define FDMI_PORT_SUPPORT_SPEED 0x2 +#define FDMI_PORT_CURRENT_SPEED 0x3 +#define FDMI_PORT_MAX_FRAME_SIZE 0x4 +#define FDMI_PORT_OS_DEVICE_NAME 0x5 +#define FDMI_PORT_HOST_NAME 0x6 +#define FDMI_PORT_NODE_NAME 0x7 +#define FDMI_PORT_NAME 0x8 +#define FDMI_PORT_SYM_NAME 0x9 +#define FDMI_PORT_TYPE 0xa +#define FDMI_PORT_SUPP_COS 0xb +#define FDMI_PORT_FABRIC_NAME 0xc +#define FDMI_PORT_FC4_TYPE 0xd +#define FDMI_PORT_STATE 0x101 +#define FDMI_PORT_COUNT 0x102 +#define FDMI_PORT_ID 0x103 #define FDMI_PORT_SPEED_1GB 0x1 #define FDMI_PORT_SPEED_2GB 0x2 @@ -2171,7 +2229,11 @@ struct ct_fdmi_hba_attributes { #define FDMI_PORT_SPEED_32GB 0x40 #define FDMI_PORT_SPEED_UNKNOWN 0x8000 -struct ct_fdmi_port_attr { +#define FC_CLASS_2 0x04 +#define FC_CLASS_3 0x08 +#define FC_CLASS_2_3 0x0C + +struct ct_fdmiv2_port_attr { uint16_t type; uint16_t len; union { @@ -2181,12 +2243,40 @@ struct ct_fdmi_port_attr { uint32_t max_frame_size; uint8_t os_dev_name[32]; uint8_t host_name[32]; + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; + uint8_t port_sym_name[128]; + uint32_t port_type; + uint32_t port_supported_cos; + uint8_t fabric_name[WWN_SIZE]; + uint8_t port_fc4_type[32]; + uint32_t port_state; + uint32_t num_ports; + uint32_t port_id; } a; }; /* * Port Attribute Block. */ +struct ct_fdmiv2_port_attributes { + uint32_t count; + struct ct_fdmiv2_port_attr entry[FDMIV2_PORT_ATTR_COUNT]; +}; + +struct ct_fdmi_port_attr { + uint16_t type; + uint16_t len; + union { + uint8_t fc4_types[32]; + uint32_t sup_speed; + uint32_t cur_speed; + uint32_t max_frame_size; + uint8_t os_dev_name[32]; + uint8_t host_name[32]; + } a; +}; + struct ct_fdmi_port_attributes { uint32_t count; struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT]; @@ -2284,6 +2374,13 @@ struct ct_sns_req { struct ct_fdmi_hba_attributes attrs; } rhba; + struct { + uint8_t hba_identifier[8]; + uint32_t entry_count; + uint8_t port_name[8]; + struct ct_fdmiv2_hba_attributes attrs; + } rhba2; + struct { uint8_t hba_identifier[8]; struct ct_fdmi_hba_attributes attrs; @@ -2294,6 +2391,11 @@ struct ct_sns_req { struct ct_fdmi_port_attributes attrs; } rpa; + struct { + uint8_t port_name[8]; + struct ct_fdmiv2_port_attributes attrs; + } rpa2; + struct { uint8_t port_name[8]; } dhba; @@ -2522,7 +2624,7 @@ struct isp_operations { int (*load_risc) (struct scsi_qla_host *, uint32_t *); char * (*pci_info_str) (struct scsi_qla_host *, char *); - char * (*fw_version_str) (struct scsi_qla_host *, char *); + char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t); irq_handler_t intr_handler; void (*enable_intrs) (struct qla_hw_data *); diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 43ef0db9654e..0f851deec0e8 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -562,7 +562,7 @@ extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t); extern int qla2x00_fdmi_register(scsi_qla_host_t *); extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *); extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *); -extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *); +extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t); /* * Global Function Prototypes in qla_attr.c source file. @@ -614,7 +614,7 @@ extern void qlafx00_soft_reset(scsi_qla_host_t *); extern int qlafx00_chip_diag(scsi_qla_host_t *); extern void qlafx00_config_rings(struct scsi_qla_host *); extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *); -extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *); +extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t); extern irqreturn_t qlafx00_intr_handler(int, void *); extern void qlafx00_enable_intrs(struct qla_hw_data *); extern void qlafx00_disable_intrs(struct qla_hw_data *); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index a0df3b1b3823..dccc4dcc39c8 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -6,6 +6,7 @@ */ #include "qla_def.h" #include "qla_target.h" +#include static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); @@ -143,10 +144,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, if (ct_rsp->header.response != __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077, - "%s failed rejected request on port_id: " - "%02x%02x%02x.\n", routine, - vha->d_id.b.domain, vha->d_id.b.area, - vha->d_id.b.al_pa); + "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n", + routine, vha->d_id.b.domain, + vha->d_id.b.area, vha->d_id.b.al_pa, + comp_status, ct_rsp->header.response); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2078, (uint8_t *)&ct_rsp->header, sizeof(struct ct_rsp_hdr)); @@ -622,15 +623,16 @@ qla2x00_rnn_id(scsi_qla_host_t *vha) } void -qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn) +qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size) { struct qla_hw_data *ha = vha->hw; if (IS_QLAFX00(ha)) - sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number, + snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number, ha->mr.fw_version, qla2x00_version_str); else - sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number, + snprintf(snn, size, + "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number, ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version, qla2x00_version_str); } @@ -670,7 +672,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha) memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE); /* Prepare the Symbolic Node Name */ - qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name); + qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name, + sizeof(ct_req->req.rsnn_nn.sym_node_name)); /* Calculate SNN length */ ct_req->req.rsnn_nn.name_len = @@ -1263,7 +1266,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; - uint8_t *entries; + void *entries; struct ct_fdmi_hba_attr *eiter; struct qla_hw_data *ha = vha->hw; @@ -1288,7 +1291,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) entries = ct_req->req.rhba.hba_identifier; /* Nodename. */ - eiter = (struct ct_fdmi_hba_attr *) (entries + size); + eiter = entries + size; eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME); eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE); memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE); @@ -1298,11 +1301,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) "NodeName = %8phN.\n", eiter->a.node_name); /* Manufacturer. */ - eiter = (struct ct_fdmi_hba_attr *) (entries + size); + eiter = entries + size; eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER); alen = strlen(QLA2XXX_MANUFACTURER); - strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1); - alen += (alen & 3) ? (4 - (alen & 3)) : 4; + snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer), + "%s", "QLogic Corporation"); + alen += 4 - (alen & 3); eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; @@ -1310,12 +1314,19 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) "Manufacturer = %s.\n", eiter->a.manufacturer); /* Serial number. */ - eiter = (struct ct_fdmi_hba_attr *) (entries + size); + eiter = entries + size; eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER); - sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; - sprintf(eiter->a.serial_num, "%c%05d", 'A' + sn / 100000, sn % 100000); + if (IS_FWI2_CAPABLE(ha)) + qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num, + sizeof(eiter->a.serial_num)); + else { + sn = ((ha->serial0 & 0x1f) << 16) | + (ha->serial2 << 8) | ha->serial1; + snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num), + "%c%05d", 'A' + sn / 100000, sn % 100000); + } alen = strlen(eiter->a.serial_num); - alen += (alen & 3) ? (4 - (alen & 3)) : 4; + alen += 4 - (alen & 3); eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; @@ -1323,11 +1334,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) "Serial no. = %s.\n", eiter->a.serial_num); /* Model name. */ - eiter = (struct ct_fdmi_hba_attr *) (entries + size); + eiter = entries + size; eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL); - strcpy(eiter->a.model, ha->model_number); + snprintf(eiter->a.model, sizeof(eiter->a.model), + "%s", ha->model_number); alen = strlen(eiter->a.model); - alen += (alen & 3) ? (4 - (alen & 3)) : 4; + alen += 4 - (alen & 3); eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; @@ -1335,11 +1347,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) "Model Name = %s.\n", eiter->a.model); /* Model description. */ - eiter = (struct ct_fdmi_hba_attr *) (entries + size); + eiter = entries + size; eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); - strncpy(eiter->a.model_desc, ha->model_desc, 80); + snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc), + "%s", ha->model_desc); alen = strlen(eiter->a.model_desc); - alen += (alen & 3) ? (4 - (alen & 3)) : 4; + alen += 4 - (alen & 3); eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; @@ -1347,11 +1360,23 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) "Model Desc = %s.\n", eiter->a.model_desc); /* Hardware version. */ - eiter = (struct ct_fdmi_hba_attr *) (entries + size); + eiter = entries + size; eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION); - strcpy(eiter->a.hw_version, ha->adapter_id); + if (!IS_FWI2_CAPABLE(ha)) { + snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version), + "HW:%s", ha->adapter_id); + } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version, + sizeof(eiter->a.hw_version))) { + ; + } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version, + sizeof(eiter->a.hw_version))) { + ; + } else { + snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version), + "HW:%s", ha->adapter_id); + } alen = strlen(eiter->a.hw_version); - alen += (alen & 3) ? (4 - (alen & 3)) : 4; + alen += 4 - (alen & 3); eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; @@ -1359,11 +1384,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) "Hardware ver = %s.\n", eiter->a.hw_version); /* Driver version. */ - eiter = (struct ct_fdmi_hba_attr *) (entries + size); + eiter = entries + size; eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION); - strcpy(eiter->a.driver_version, qla2x00_version_str); + snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version), + "%s", qla2x00_version_str); alen = strlen(eiter->a.driver_version); - alen += (alen & 3) ? (4 - (alen & 3)) : 4; + alen += 4 - (alen & 3); eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; @@ -1371,11 +1397,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) "Driver ver = %s.\n", eiter->a.driver_version); /* Option ROM version. */ - eiter = (struct ct_fdmi_hba_attr *) (entries + size); + eiter = entries + size; eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); - strcpy(eiter->a.orom_version, "0.00"); + snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version), + "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); alen = strlen(eiter->a.orom_version); - alen += (alen & 3) ? (4 - (alen & 3)) : 4; + alen += 4 - (alen & 3); eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; @@ -1383,11 +1410,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) "Optrom vers = %s.\n", eiter->a.orom_version); /* Firmware version */ - eiter = (struct ct_fdmi_hba_attr *) (entries + size); + eiter = entries + size; eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); - ha->isp_ops->fw_version_str(vha, eiter->a.fw_version); + ha->isp_ops->fw_version_str(vha, eiter->a.fw_version, + sizeof(eiter->a.fw_version)); alen = strlen(eiter->a.fw_version); - alen += (alen & 3) ? (4 - (alen & 3)) : 4; + alen += 4 - (alen & 3); eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; @@ -1419,6 +1447,11 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) ql_dbg(ql_dbg_disc, vha, 0x2034, "HBA already registered.\n"); rval = QLA_ALREADY_REGISTERED; + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ad, + "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n", + ct_rsp->header.reason_code, + ct_rsp->header.explanation_code); } } else { ql_dbg(ql_dbg_disc, vha, 0x2035, @@ -1428,6 +1461,534 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) return rval; } +/** + * qla2x00_fdmi_rpa() - + * @ha: HA context + * + * Returns 0 on success. + */ +static int +qla2x00_fdmi_rpa(scsi_qla_host_t *vha) +{ + int rval, alen; + uint32_t size; + struct qla_hw_data *ha = vha->hw; + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + void *entries; + struct ct_fdmi_port_attr *eiter; + struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb; + struct new_utsname *p_sysid = NULL; + + /* Issue RPA */ + /* Prepare common MS IOCB */ + /* Request size adjusted after CT preparation */ + ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE); + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, + RPA_RSP_SIZE); + ct_rsp = &ha->ct_sns->p.rsp; + + /* Prepare FDMI command arguments -- attribute block, attributes. */ + memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE); + size = WWN_SIZE + 4; + + /* Attributes */ + ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT); + entries = ct_req->req.rpa.port_name; + + /* FC4 types. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES); + eiter->len = cpu_to_be16(4 + 32); + eiter->a.fc4_types[2] = 0x01; + size += 4 + 32; + + ql_dbg(ql_dbg_disc, vha, 0x2039, + "FC4_TYPES=%02x %02x.\n", + eiter->a.fc4_types[2], + eiter->a.fc4_types[1]); + + /* Supported speed. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); + eiter->len = cpu_to_be16(4 + 4); + if (IS_CNA_CAPABLE(ha)) + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_10GB); + else if (IS_QLA27XX(ha)) + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_32GB| + FDMI_PORT_SPEED_16GB| + FDMI_PORT_SPEED_8GB); + else if (IS_QLA2031(ha)) + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_16GB| + FDMI_PORT_SPEED_8GB| + FDMI_PORT_SPEED_4GB); + else if (IS_QLA25XX(ha)) + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_8GB| + FDMI_PORT_SPEED_4GB| + FDMI_PORT_SPEED_2GB| + FDMI_PORT_SPEED_1GB); + else if (IS_QLA24XX_TYPE(ha)) + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_4GB| + FDMI_PORT_SPEED_2GB| + FDMI_PORT_SPEED_1GB); + else if (IS_QLA23XX(ha)) + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_2GB| + FDMI_PORT_SPEED_1GB); + else + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_1GB); + size += 4 + 4; + + ql_dbg(ql_dbg_disc, vha, 0x203a, + "Supported_Speed=%x.\n", eiter->a.sup_speed); + + /* Current speed. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED); + eiter->len = cpu_to_be16(4 + 4); + switch (ha->link_data_rate) { + case PORT_SPEED_1GB: + eiter->a.cur_speed = + cpu_to_be32(FDMI_PORT_SPEED_1GB); + break; + case PORT_SPEED_2GB: + eiter->a.cur_speed = + cpu_to_be32(FDMI_PORT_SPEED_2GB); + break; + case PORT_SPEED_4GB: + eiter->a.cur_speed = + cpu_to_be32(FDMI_PORT_SPEED_4GB); + break; + case PORT_SPEED_8GB: + eiter->a.cur_speed = + cpu_to_be32(FDMI_PORT_SPEED_8GB); + break; + case PORT_SPEED_10GB: + eiter->a.cur_speed = + cpu_to_be32(FDMI_PORT_SPEED_10GB); + break; + case PORT_SPEED_16GB: + eiter->a.cur_speed = + cpu_to_be32(FDMI_PORT_SPEED_16GB); + break; + case PORT_SPEED_32GB: + eiter->a.cur_speed = + cpu_to_be32(FDMI_PORT_SPEED_32GB); + break; + default: + eiter->a.cur_speed = + cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); + break; + } + size += 4 + 4; + + ql_dbg(ql_dbg_disc, vha, 0x203b, + "Current_Speed=%x.\n", eiter->a.cur_speed); + + /* Max frame size. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); + eiter->len = cpu_to_be16(4 + 4); + eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ? + le16_to_cpu(icb24->frame_payload_size) : + le16_to_cpu(ha->init_cb->frame_payload_size); + eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size); + size += 4 + 4; + + ql_dbg(ql_dbg_disc, vha, 0x203c, + "Max_Frame_Size=%x.\n", eiter->a.max_frame_size); + + /* OS device name. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME); + snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name), + "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no); + alen = strlen(eiter->a.os_dev_name); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x204b, + "OS_Device_Name=%s.\n", eiter->a.os_dev_name); + + /* Hostname. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME); + p_sysid = utsname(); + if (p_sysid) { + snprintf(eiter->a.host_name, sizeof(eiter->a.host_name), + "%s", p_sysid->nodename); + } else { + snprintf(eiter->a.host_name, sizeof(eiter->a.host_name), + "%s", fc_host_system_hostname(vha->host)); + } + alen = strlen(eiter->a.host_name); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name); + + /* Update MS request size. */ + qla2x00_update_ms_fdmi_iocb(vha, size + 16); + + ql_dbg(ql_dbg_disc, vha, 0x203e, + "RPA portname %016llx, size = %d.\n", + wwn_to_u64(ct_req->req.rpa.port_name), size); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079, + entries, size); + + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(ms_iocb_entry_t)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x2040, + "RPA issue IOCB failed (%d).\n", rval); + } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") != + QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && + ct_rsp->header.explanation_code == + CT_EXPL_ALREADY_REGISTERED) { + ql_dbg(ql_dbg_disc, vha, 0x20cd, + "RPA already registered.\n"); + rval = QLA_ALREADY_REGISTERED; + } + + } else { + ql_dbg(ql_dbg_disc, vha, 0x2041, + "RPA exiting normally.\n"); + } + + return rval; +} + +/** + * qla2x00_fdmiv2_rhba() - + * @ha: HA context + * + * Returns 0 on success. + */ +static int +qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha) +{ + int rval, alen; + uint32_t size, sn; + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + void *entries; + struct ct_fdmiv2_hba_attr *eiter; + struct qla_hw_data *ha = vha->hw; + struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb; + struct new_utsname *p_sysid = NULL; + + /* Issue RHBA */ + /* Prepare common MS IOCB */ + /* Request size adjusted after CT preparation */ + ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE); + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, + RHBA_RSP_SIZE); + ct_rsp = &ha->ct_sns->p.rsp; + + /* Prepare FDMI command arguments -- attribute block, attributes. */ + memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE); + ct_req->req.rhba2.entry_count = cpu_to_be32(1); + memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE); + size = 2 * WWN_SIZE + 4 + 4; + + /* Attributes */ + ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT); + entries = ct_req->req.rhba2.hba_identifier; + + /* Nodename. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME); + eiter->len = cpu_to_be16(4 + WWN_SIZE); + memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE); + size += 4 + WWN_SIZE; + + ql_dbg(ql_dbg_disc, vha, 0x207d, + "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name)); + + /* Manufacturer. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER); + snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer), + "%s", "QLogic Corporation"); + eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0'; + alen = strlen(eiter->a.manufacturer); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20a5, + "Manufacturer = %s.\n", eiter->a.manufacturer); + + /* Serial number. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER); + if (IS_FWI2_CAPABLE(ha)) + qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num, + sizeof(eiter->a.serial_num)); + else { + sn = ((ha->serial0 & 0x1f) << 16) | + (ha->serial2 << 8) | ha->serial1; + snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num), + "%c%05d", 'A' + sn / 100000, sn % 100000); + } + alen = strlen(eiter->a.serial_num); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20a6, + "Serial no. = %s.\n", eiter->a.serial_num); + + /* Model name. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_MODEL); + snprintf(eiter->a.model, sizeof(eiter->a.model), + "%s", ha->model_number); + alen = strlen(eiter->a.model); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20a7, + "Model Name = %s.\n", eiter->a.model); + + /* Model description. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); + snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc), + "%s", ha->model_desc); + alen = strlen(eiter->a.model_desc); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20a8, + "Model Desc = %s.\n", eiter->a.model_desc); + + /* Hardware version. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION); + if (!IS_FWI2_CAPABLE(ha)) { + snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version), + "HW:%s", ha->adapter_id); + } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version, + sizeof(eiter->a.hw_version))) { + ; + } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version, + sizeof(eiter->a.hw_version))) { + ; + } else { + snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version), + "HW:%s", ha->adapter_id); + } + alen = strlen(eiter->a.hw_version); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20a9, + "Hardware ver = %s.\n", eiter->a.hw_version); + + /* Driver version. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION); + snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version), + "%s", qla2x00_version_str); + alen = strlen(eiter->a.driver_version); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20aa, + "Driver ver = %s.\n", eiter->a.driver_version); + + /* Option ROM version. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); + snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version), + "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); + alen = strlen(eiter->a.orom_version); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha , 0x20ab, + "Optrom version = %d.%02d.\n", eiter->a.orom_version[1], + eiter->a.orom_version[0]); + + /* Firmware version */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); + ha->isp_ops->fw_version_str(vha, eiter->a.fw_version, + sizeof(eiter->a.fw_version)); + alen = strlen(eiter->a.fw_version); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20ac, + "Firmware vers = %s.\n", eiter->a.fw_version); + + /* OS Name and Version */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION); + p_sysid = utsname(); + if (p_sysid) { + snprintf(eiter->a.os_version, sizeof(eiter->a.os_version), + "%s %s %s", + p_sysid->sysname, p_sysid->release, p_sysid->version); + } else { + snprintf(eiter->a.os_version, sizeof(eiter->a.os_version), + "%s %s", "Linux", fc_host_system_hostname(vha->host)); + } + alen = strlen(eiter->a.os_version); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20ae, + "OS Name and Version = %s.\n", eiter->a.os_version); + + /* MAX CT Payload Length */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH); + eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ? + le16_to_cpu(icb24->frame_payload_size) : + le16_to_cpu(ha->init_cb->frame_payload_size); + eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len); + eiter->len = cpu_to_be16(4 + 4); + size += 4 + 4; + + ql_dbg(ql_dbg_disc, vha, 0x20af, + "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len); + + /* Node Sybolic Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME); + qla2x00_get_sym_node_name(vha, eiter->a.sym_name, + sizeof(eiter->a.sym_name)); + alen = strlen(eiter->a.sym_name); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20b0, + "Symbolic Name = %s.\n", eiter->a.sym_name); + + /* Vendor Id */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID); + eiter->a.vendor_id = cpu_to_be32(0x1077); + eiter->len = cpu_to_be16(4 + 4); + size += 4 + 4; + + ql_dbg(ql_dbg_disc, vha, 0x20b1, + "Vendor Id = %x.\n", eiter->a.vendor_id); + + /* Num Ports */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS); + eiter->a.num_ports = cpu_to_be32(1); + eiter->len = cpu_to_be16(4 + 4); + size += 4 + 4; + + ql_dbg(ql_dbg_disc, vha, 0x20b2, + "Port Num = %x.\n", eiter->a.num_ports); + + /* Fabric Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME); + memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE); + eiter->len = cpu_to_be16(4 + WWN_SIZE); + size += 4 + WWN_SIZE; + + ql_dbg(ql_dbg_disc, vha, 0x20b3, + "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); + + /* BIOS Version */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME); + snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name), + "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]); + alen = strlen(eiter->a.bios_name); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20b4, + "BIOS Name = %s\n", eiter->a.bios_name); + + /* Vendor Identifier */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER); + snprintf(eiter->a.vendor_indentifer, sizeof(eiter->a.vendor_indentifer), + "%s", "QLGC"); + alen = strlen(eiter->a.vendor_indentifer); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20b1, + "Vendor Identifier = %s.\n", eiter->a.vendor_indentifer); + + /* Update MS request size. */ + qla2x00_update_ms_fdmi_iocb(vha, size + 16); + + ql_dbg(ql_dbg_disc, vha, 0x20b5, + "RHBA identifier = %016llx.\n", + wwn_to_u64(ct_req->req.rhba2.hba_identifier)); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6, + entries, size); + + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(ms_iocb_entry_t)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x20b7, + "RHBA issue IOCB failed (%d).\n", rval); + } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") != + QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + + if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && + ct_rsp->header.explanation_code == + CT_EXPL_ALREADY_REGISTERED) { + ql_dbg(ql_dbg_disc, vha, 0x20b8, + "HBA already registered.\n"); + rval = QLA_ALREADY_REGISTERED; + } else { + ql_dbg(ql_dbg_disc, vha, 0x2016, + "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n", + ct_rsp->header.reason_code, + ct_rsp->header.explanation_code); + } + } else { + ql_dbg(ql_dbg_disc, vha, 0x20b9, + "RHBA FDMI V2 exiting normally.\n"); + } + + return rval; +} + /** * qla2x00_fdmi_dhba() - * @ha: HA context @@ -1477,23 +2038,24 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha) } /** - * qla2x00_fdmi_rpa() - + * qla2x00_fdmiv2_rpa() - * @ha: HA context * * Returns 0 on success. */ static int -qla2x00_fdmi_rpa(scsi_qla_host_t *vha) +qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha) { int rval, alen; - uint32_t size, max_frame_size; + uint32_t size; struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; - uint8_t *entries; - struct ct_fdmi_port_attr *eiter; + void *entries; + struct ct_fdmiv2_port_attr *eiter; struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb; + struct new_utsname *p_sysid = NULL; /* Issue RPA */ /* Prepare common MS IOCB */ @@ -1505,147 +2067,258 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) ct_rsp = &ha->ct_sns->p.rsp; /* Prepare FDMI command arguments -- attribute block, attributes. */ - memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE); + memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE); size = WWN_SIZE + 4; /* Attributes */ - ct_req->req.rpa.attrs.count = - __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT - 1); - entries = ct_req->req.rpa.port_name; + ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT); + entries = ct_req->req.rpa2.port_name; /* FC4 types. */ - eiter = (struct ct_fdmi_port_attr *) (entries + size); - eiter->type = __constant_cpu_to_be16(FDMI_PORT_FC4_TYPES); - eiter->len = __constant_cpu_to_be16(4 + 32); + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES); + eiter->len = cpu_to_be16(4 + 32); eiter->a.fc4_types[2] = 0x01; size += 4 + 32; - ql_dbg(ql_dbg_disc, vha, 0x2039, + ql_dbg(ql_dbg_disc, vha, 0x20ba, "FC4_TYPES=%02x %02x.\n", eiter->a.fc4_types[2], eiter->a.fc4_types[1]); /* Supported speed. */ - eiter = (struct ct_fdmi_port_attr *) (entries + size); - eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); - eiter->len = __constant_cpu_to_be16(4 + 4); + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); + eiter->len = cpu_to_be16(4 + 4); if (IS_CNA_CAPABLE(ha)) - eiter->a.sup_speed = __constant_cpu_to_be32( + eiter->a.sup_speed = cpu_to_be32( FDMI_PORT_SPEED_10GB); else if (IS_QLA27XX(ha)) - eiter->a.sup_speed = __constant_cpu_to_be32( - FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB| + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_32GB| + FDMI_PORT_SPEED_16GB| FDMI_PORT_SPEED_8GB); + else if (IS_QLA2031(ha)) + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_16GB| + FDMI_PORT_SPEED_8GB| + FDMI_PORT_SPEED_4GB); else if (IS_QLA25XX(ha)) - eiter->a.sup_speed = __constant_cpu_to_be32( - FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| - FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB); + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_8GB| + FDMI_PORT_SPEED_4GB| + FDMI_PORT_SPEED_2GB| + FDMI_PORT_SPEED_1GB); else if (IS_QLA24XX_TYPE(ha)) - eiter->a.sup_speed = __constant_cpu_to_be32( - FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| - FDMI_PORT_SPEED_4GB); + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_4GB| + FDMI_PORT_SPEED_2GB| + FDMI_PORT_SPEED_1GB); else if (IS_QLA23XX(ha)) - eiter->a.sup_speed =__constant_cpu_to_be32( - FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB); + eiter->a.sup_speed = cpu_to_be32( + FDMI_PORT_SPEED_2GB| + FDMI_PORT_SPEED_1GB); else - eiter->a.sup_speed = __constant_cpu_to_be32( + eiter->a.sup_speed = cpu_to_be32( FDMI_PORT_SPEED_1GB); size += 4 + 4; - ql_dbg(ql_dbg_disc, vha, 0x203a, - "Supported_Speed=%x.\n", eiter->a.sup_speed); + ql_dbg(ql_dbg_disc, vha, 0x20bb, + "Supported Port Speed = %x.\n", eiter->a.sup_speed); /* Current speed. */ - eiter = (struct ct_fdmi_port_attr *) (entries + size); - eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED); - eiter->len = __constant_cpu_to_be16(4 + 4); + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED); + eiter->len = cpu_to_be16(4 + 4); switch (ha->link_data_rate) { case PORT_SPEED_1GB: - eiter->a.cur_speed = - __constant_cpu_to_be32(FDMI_PORT_SPEED_1GB); + eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB); break; case PORT_SPEED_2GB: - eiter->a.cur_speed = - __constant_cpu_to_be32(FDMI_PORT_SPEED_2GB); + eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB); break; case PORT_SPEED_4GB: - eiter->a.cur_speed = - __constant_cpu_to_be32(FDMI_PORT_SPEED_4GB); + eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB); break; case PORT_SPEED_8GB: - eiter->a.cur_speed = - __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB); + eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB); break; case PORT_SPEED_10GB: - eiter->a.cur_speed = - __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB); + eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB); break; case PORT_SPEED_16GB: - eiter->a.cur_speed = - __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB); + eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB); break; case PORT_SPEED_32GB: - eiter->a.cur_speed = - __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB); + eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB); break; default: - eiter->a.cur_speed = - __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); + eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); break; } size += 4 + 4; - ql_dbg(ql_dbg_disc, vha, 0x203b, - "Current_Speed=%x.\n", eiter->a.cur_speed); + ql_dbg(ql_dbg_disc, vha, 0x20bc, + "Current_Speed = %x.\n", eiter->a.cur_speed); /* Max frame size. */ - eiter = (struct ct_fdmi_port_attr *) (entries + size); - eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); - eiter->len = __constant_cpu_to_be16(4 + 4); - max_frame_size = IS_FWI2_CAPABLE(ha) ? + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); + eiter->len = cpu_to_be16(4 + 4); + eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ? le16_to_cpu(icb24->frame_payload_size): le16_to_cpu(ha->init_cb->frame_payload_size); - eiter->a.max_frame_size = cpu_to_be32(max_frame_size); + eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size); size += 4 + 4; - ql_dbg(ql_dbg_disc, vha, 0x203c, - "Max_Frame_Size=%x.\n", eiter->a.max_frame_size); + ql_dbg(ql_dbg_disc, vha, 0x20bc, + "Max_Frame_Size = %x.\n", eiter->a.max_frame_size); /* OS device name. */ - eiter = (struct ct_fdmi_port_attr *) (entries + size); - eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME); + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME); alen = strlen(QLA2XXX_DRIVER_NAME); - strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1); - alen += (alen & 3) ? (4 - (alen & 3)) : 4; + snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name), + "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no); + alen += 4 - (alen & 3); eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - ql_dbg(ql_dbg_disc, vha, 0x204b, - "OS_Device_Name=%s.\n", eiter->a.os_dev_name); + ql_dbg(ql_dbg_disc, vha, 0x20be, + "OS_Device_Name = %s.\n", eiter->a.os_dev_name); /* Hostname. */ - if (strlen(fc_host_system_hostname(vha->host))) { - ct_req->req.rpa.attrs.count = - __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT); - eiter = (struct ct_fdmi_port_attr *) (entries + size); - eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME); + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME); + p_sysid = utsname(); + if (p_sysid) { + snprintf(eiter->a.host_name, sizeof(eiter->a.host_name), + "%s", p_sysid->nodename); + } else { snprintf(eiter->a.host_name, sizeof(eiter->a.host_name), "%s", fc_host_system_hostname(vha->host)); - alen = strlen(eiter->a.host_name); - alen += (alen & 3) ? (4 - (alen & 3)) : 4; - eiter->len = cpu_to_be16(4 + alen); - size += 4 + alen; - - ql_dbg(ql_dbg_disc, vha, 0x203d, - "HostName=%s.\n", eiter->a.host_name); } + alen = strlen(eiter->a.host_name); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x203d, + "HostName=%s.\n", eiter->a.host_name); + + /* Node Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME); + memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE); + eiter->len = cpu_to_be16(4 + WWN_SIZE); + size += 4 + WWN_SIZE; + + ql_dbg(ql_dbg_disc, vha, 0x20c0, + "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name)); + + /* Port Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_NAME); + memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE); + eiter->len = cpu_to_be16(4 + WWN_SIZE); + size += 4 + WWN_SIZE; + + ql_dbg(ql_dbg_disc, vha, 0x20c1, + "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name)); + + /* Port Symbolic Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME); + qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name, + sizeof(eiter->a.port_sym_name)); + alen = strlen(eiter->a.port_sym_name); + alen += 4 - (alen & 3); + eiter->len = cpu_to_be16(4 + alen); + size += 4 + alen; + + ql_dbg(ql_dbg_disc, vha, 0x20c2, + "port symbolic name = %s\n", eiter->a.port_sym_name); + + /* Port Type */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_TYPE); + eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE); + eiter->len = cpu_to_be16(4 + 4); + size += 4 + 4; + + ql_dbg(ql_dbg_disc, vha, 0x20c3, + "Port Type = %x.\n", eiter->a.port_type); + + /* Class of Service */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS); + eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3); + eiter->len = cpu_to_be16(4 + 4); + size += 4 + 4; + + ql_dbg(ql_dbg_disc, vha, 0x20c4, + "Supported COS = %08x\n", eiter->a.port_supported_cos); + + /* Port Fabric Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME); + memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE); + eiter->len = cpu_to_be16(4 + WWN_SIZE); + size += 4 + WWN_SIZE; + + ql_dbg(ql_dbg_disc, vha, 0x20c5, + "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); + + /* FC4_type */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE); + eiter->a.port_fc4_type[0] = 0; + eiter->a.port_fc4_type[1] = 0; + eiter->a.port_fc4_type[2] = 1; + eiter->a.port_fc4_type[3] = 0; + eiter->len = cpu_to_be16(4 + 32); + size += 4 + 32; + + ql_dbg(ql_dbg_disc, vha, 0x20c6, + "Port Active FC4 Type = %02x %02x.\n", + eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]); + + /* Port State */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_STATE); + eiter->a.port_state = cpu_to_be32(1); + eiter->len = cpu_to_be16(4 + 4); + size += 4 + 4; + + ql_dbg(ql_dbg_disc, vha, 0x20c7, + "Port State = %x.\n", eiter->a.port_state); + + /* Number of Ports */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_COUNT); + eiter->a.num_ports = cpu_to_be32(1); + eiter->len = cpu_to_be16(4 + 4); + size += 4 + 4; + + ql_dbg(ql_dbg_disc, vha, 0x20c8, + "Number of ports = %x.\n", eiter->a.num_ports); + + /* Port Id */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_ID); + eiter->a.port_id = cpu_to_be32(vha->d_id.b24); + eiter->len = cpu_to_be16(4 + 4); + size += 4 + 4; + + ql_dbg(ql_dbg_disc, vha, 0x20c8, + "Port Id = %x.\n", eiter->a.port_id); /* Update MS request size. */ qla2x00_update_ms_fdmi_iocb(vha, size + 16); ql_dbg(ql_dbg_disc, vha, 0x203e, "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size); - ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079, + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca, entries, size); /* Execute MS IOCB */ @@ -1653,14 +2326,26 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - ql_dbg(ql_dbg_disc, vha, 0x2040, - "RPA issue IOCB failed (%d).\n", rval); + ql_dbg(ql_dbg_disc, vha, 0x20cb, + "RPA FDMI v2 issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; + if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && + ct_rsp->header.explanation_code == + CT_EXPL_ALREADY_REGISTERED) { + ql_dbg(ql_dbg_disc, vha, 0x20ce, + "RPA FDMI v2 already registered\n"); + rval = QLA_ALREADY_REGISTERED; + } else { + ql_dbg(ql_dbg_disc, vha, 0x2020, + "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n", + ct_rsp->header.reason_code, + ct_rsp->header.explanation_code); + } } else { - ql_dbg(ql_dbg_disc, vha, 0x2041, - "RPA exiting nornally.\n"); + ql_dbg(ql_dbg_disc, vha, 0x20cc, + "RPA FDMI V2 exiting normally.\n"); } return rval; @@ -1675,8 +2360,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) int qla2x00_fdmi_register(scsi_qla_host_t *vha) { - int rval; - struct qla_hw_data *ha = vha->hw; + int rval = QLA_FUNCTION_FAILED; + struct qla_hw_data *ha = vha->hw; if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLAFX00(ha)) @@ -1686,6 +2371,26 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha) if (rval) return rval; + rval = qla2x00_fdmiv2_rhba(vha); + if (rval) { + if (rval != QLA_ALREADY_REGISTERED) + goto try_fdmi; + + rval = qla2x00_fdmi_dhba(vha); + if (rval) + goto try_fdmi; + + rval = qla2x00_fdmiv2_rhba(vha); + if (rval) + goto try_fdmi; + } + rval = qla2x00_fdmiv2_rpa(vha); + if (rval) + goto try_fdmi; + + goto out; + +try_fdmi: rval = qla2x00_fdmi_rhba(vha); if (rval) { if (rval != QLA_ALREADY_REGISTERED) @@ -1700,7 +2405,7 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha) return rval; } rval = qla2x00_fdmi_rpa(vha); - +out: return rval; } diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index c54a53de351b..80867599527d 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -695,11 +695,11 @@ qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str) } char * -qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str) +qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) { struct qla_hw_data *ha = vha->hw; - sprintf(str, "%s", ha->mr.fw_version); + snprintf(str, size, "%s", ha->mr.fw_version); return str; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index f93785957b72..1b9864cdd83c 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -548,14 +548,13 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) } static char * -qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str) +qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) { char un_str[10]; struct qla_hw_data *ha = vha->hw; - sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, - ha->fw_minor_version, - ha->fw_subminor_version); + snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version, + ha->fw_minor_version, ha->fw_subminor_version); if (ha->fw_attributes & BIT_9) { strcat(str, "FLX"); @@ -587,11 +586,11 @@ qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str) } static char * -qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str) +qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) { struct qla_hw_data *ha = vha->hw; - sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version, + snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); return str; } @@ -2925,7 +2924,7 @@ skip_dpc: pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no, - ha->isp_ops->fw_version_str(base_vha, fw_str)); + ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); qlt_add_target(ha, base_vha); -- cgit From 7c9c4766896335d2f20928ccc5d8ad2d1e621b9a Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:47 -0400 Subject: qla2xxx: Add FA-WWN functionality. Add support for the FA-WWN (fabric assigned wwn) fabric switch feature. (Fabric Assigned World Wide Port Name) Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_dbg.c | 7 ++----- drivers/scsi/qla2xxx/qla_fw.h | 2 +- drivers/scsi/qla2xxx/qla_mbx.c | 36 +++++++++++++++++++++++++++++++++++- 3 files changed, 38 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 28442d082ba1..b33eec01d6dc 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -15,11 +15,8 @@ * | | | 0x0144,0x0146 | * | | | 0x015b-0x0160 | * | | | 0x016e-0x0170 | - * | Mailbox commands | 0x118d | 0x1018-0x1019 | - * | | | 0x10ca | - * | | | 0x1115-0x1116 | - * | | | 0x111a-0x111b | - * | | | 0x1155-0x1158 | + * | Mailbox commands | 0x118d | 0x1115-0x1116 | + * | | | 0x111a-0x111b | * | Device Discovery | 0x2016 | 0x2020-0x2022, | * | | | 0x2011-0x2012, | * | | | 0x2099-0x20a4 | diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index c7d1c4569fa1..e8669aa7e002 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -317,7 +317,7 @@ struct init_cb_24xx { * BIT 3 = Reserved * BIT 4 = Enable Target Mode * BIT 5 = Disable Initiator Mode - * BIT 6 = Reserved + * BIT 6 = Acquire FA-WWN * BIT 7 = Reserved * * BIT 8 = Reserved diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index ca7f4f012511..2de901bf87ab 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1085,6 +1085,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_CNA_CAPABLE(vha->hw)) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; + if (IS_FWI2_CAPABLE(vha->hw)) + mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -1118,6 +1120,22 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; } + /* If FA-WWN supported */ + if (mcp->mb[7] & BIT_14) { + vha->port_name[0] = MSB(mcp->mb[16]); + vha->port_name[1] = LSB(mcp->mb[16]); + vha->port_name[2] = MSB(mcp->mb[17]); + vha->port_name[3] = LSB(mcp->mb[17]); + vha->port_name[4] = MSB(mcp->mb[18]); + vha->port_name[5] = LSB(mcp->mb[18]); + vha->port_name[6] = MSB(mcp->mb[19]); + vha->port_name[7] = LSB(mcp->mb[19]); + fc_host_port_name(vha->host) = + wwn_to_u64(vha->port_name); + ql_dbg(ql_dbg_mbx, vha, 0x10ca, + "FA-WWN acquired %016llx\n", + wwn_to_u64(vha->port_name)); + } } return rval; @@ -3328,8 +3346,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); + /* FA-WWN is only for physical port */ + if (!vp_idx) { + void *wwpn = ha->init_cb->port_name; + + if (!MSB(stat)) { + if (rptid_entry->vp_idx_map[1] & BIT_6) + wwpn = rptid_entry->reserved_4 + 8; + } + memcpy(vha->port_name, wwpn, WWN_SIZE); + fc_host_port_name(vha->host) = + wwn_to_u64(vha->port_name); + ql_dbg(ql_dbg_mbx, vha, 0x1018, + "FA-WWN portname %016llx (%x)\n", + fc_host_port_name(vha->host), MSB(stat)); + } + vp = vha; - if (vp_idx == 0 && (MSB(stat) != 1)) + if (vp_idx == 0) goto reg_needed; if (MSB(stat) != 0 && MSB(stat) != 2) { -- cgit From b5a340dd858b5bdd2813756e14991dc64c0b16d9 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:16:48 -0400 Subject: qla2xxx: Add diagnostic port functionality. Add support for the D-port (diagnostic port) fabric switch feature. (Fabric Switch initiates loopback style port testing) Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_attr.c | 6 +++--- drivers/scsi/qla2xxx/qla_dbg.c | 2 +- drivers/scsi/qla2xxx/qla_def.h | 2 ++ drivers/scsi/qla2xxx/qla_fw.h | 2 +- drivers/scsi/qla2xxx/qla_init.c | 6 +++--- drivers/scsi/qla2xxx/qla_isr.c | 8 ++++++++ drivers/scsi/qla2xxx/qla_mbx.c | 3 ++- 7 files changed, 20 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 868f4e5bd863..677239f41ea5 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1440,7 +1440,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int rval = QLA_FUNCTION_FAILED; - uint16_t state[5]; + uint16_t state[6]; uint32_t pstate; if (IS_QLAFX00(vha->hw)) { @@ -1456,8 +1456,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, if (rval != QLA_SUCCESS) memset(state, -1, sizeof(state)); - return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], - state[1], state[2], state[3], state[4]); + return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + state[0], state[1], state[2], state[3], state[4], state[5]); } static ssize_t diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index b33eec01d6dc..2d5610be2d70 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -28,7 +28,7 @@ * | | | 0x303a | * | DPC Thread | 0x4023 | 0x4002,0x4013 | * | Async Events | 0x5087 | 0x502b-0x502f | - * | | | 0x5047,0x5052 | + * | | | 0x5047 | * | | | 0x5084,0x5075 | * | | | 0x503d,0x5044 | * | | | 0x507b,0x505f | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e5baead2369a..c161d60759e9 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -803,6 +803,7 @@ struct mbx_cmd_32 { #define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */ #define MBA_INIT_REQUIRED 0x8061 /* Initialization required */ #define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */ +#define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */ #define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */ #define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change Notification */ @@ -948,6 +949,7 @@ struct mbx_cmd_32 { #define MBC_WRITE_SFP 0x30 /* Write SFP Data. */ #define MBC_READ_SFP 0x31 /* Read SFP Data. */ #define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */ +#define MBC_DPORT_DIAGNOSTICS 0x47 /* D-Port Diagnostics */ #define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */ #define MBC_MID_GET_VP_DATABASE 0x49 /* MID Get VP Database. */ #define MBC_MID_GET_VP_ENTRY 0x4a /* MID Get VP Entry. */ diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index e8669aa7e002..7f2e1c71cc31 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -318,7 +318,7 @@ struct init_cb_24xx { * BIT 4 = Enable Target Mode * BIT 5 = Disable Initiator Mode * BIT 6 = Acquire FA-WWN - * BIT 7 = Reserved + * BIT 7 = Enable D-port Diagnostics * * BIT 8 = Reserved * BIT 9 = Non Participating LIP diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 748bd9093680..d5b10ecde4a0 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2224,7 +2224,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) unsigned long wtime, mtime, cs84xx_time; uint16_t min_wait; /* Minimum wait time if loop is down */ uint16_t wait_time; /* Wait time if loop is coming ready */ - uint16_t state[5]; + uint16_t state[6]; struct qla_hw_data *ha = vha->hw; if (IS_QLAFX00(vha->hw)) @@ -2329,8 +2329,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) } while (1); ql_dbg(ql_dbg_taskm, vha, 0x803a, - "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0], - state[1], state[2], state[3], state[4], jiffies); + "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0], + state[1], state[2], state[3], state[4], state[5], jiffies); if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { ql_log(ql_log_warn, vha, 0x803b, diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 223c1a89af5c..550ffdf0bf17 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1086,6 +1086,14 @@ skip_rio: qla83xx_handle_8200_aen(vha, mb); break; + case MBA_DPORT_DIAGNOSTICS: + ql_dbg(ql_dbg_async, vha, 0x5052, + "D-Port Diagnostics: %04x %04x=%s\n", mb[0], mb[1], + mb[1] == 0 ? "start" : + mb[1] == 1 ? "done (ok)" : + mb[1] == 2 ? "done (error)" : "other"); + break; + default: ql_dbg(ql_dbg_async, vha, 0x5057, "Unknown AEN:%04x %04x %04x %04x\n", diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 2de901bf87ab..72971daa2552 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1564,7 +1564,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) - mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; else mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; @@ -1578,6 +1578,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) states[2] = mcp->mb[3]; states[3] = mcp->mb[4]; states[4] = mcp->mb[5]; + states[5] = mcp->mb[6]; /* DPORT status */ } if (rval != QLA_SUCCESS) { -- cgit From 00ef7d885b716389f1ffd0a3158b673783b95506 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 05:16:49 -0400 Subject: qla2xxx: Fix driver version string message. Signed-off-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_sup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 4788ecd0c42a..b656a05613e8 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -3092,7 +3092,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) ha->fw_revision[2] = dcode[2]; ha->fw_revision[3] = dcode[3]; ql_dbg(ql_dbg_init, vha, 0x0060, - "Firmware revision %d.%d.%d.%d.\n", + "Firmware revision %d.%d.%d (%x).\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], ha->fw_revision[3]); } -- cgit From de187df86721da0a4d38f94557797a7138895e0a Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 05:16:50 -0400 Subject: qla2xxx: Allow user to change ql2xfdmienable value. Signed-off-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_os.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1b9864cdd83c..0f9dcf61c13a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -105,7 +105,7 @@ MODULE_PARM_DESC(ql2xshiftctondsd, "based on total number of SG elements."); int ql2xfdmienable=1; -module_param(ql2xfdmienable, int, S_IRUGO); +module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xfdmienable, "Enables FDMI registrations. " "0 - no FDMI. Default is 1 - perform FDMI."); -- cgit From d2749ffadb4c76c04ad82645cc86a2fa39ceff3e Mon Sep 17 00:00:00 2001 From: Arun Easi Date: Thu, 25 Sep 2014 05:16:51 -0400 Subject: qla2xxx: Unload of qla2xxx driver crashes the machine. Signed-off-by: Arun Easi Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_os.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 0f9dcf61c13a..4880f0b2daa3 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3193,10 +3193,10 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_free_device(base_vha); - scsi_host_put(base_vha->host); - qla2x00_clear_drv_active(ha); + scsi_host_put(base_vha->host); + qla2x00_unmap_iobases(ha); pci_release_selected_regions(ha->pdev, ha->bars); -- cgit From 9d35894d338abc351cad8b0c0d5fb3e992f5cea9 Mon Sep 17 00:00:00 2001 From: Sawan Chandak Date: Thu, 25 Sep 2014 05:16:52 -0400 Subject: qla2xxx: Add fix in driver unload for pending activity. Signed-off-by: Sawan Chandak Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_os.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 4880f0b2daa3..72b94f9ca637 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -860,8 +860,10 @@ qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - while ((!(vha->flags.online) || ha->dpc_active || - ha->flags.mbox_busy)) + while (((qla2x00_reset_active(vha)) || ha->dpc_active || + ha->flags.mbox_busy) || + test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || + test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) msleep(1000); } -- cgit From 93f2bd67b34b4f6b35b2300d668d92e3fd01163a Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Thu, 25 Sep 2014 05:16:53 -0400 Subject: qla2xxx: Declaration error cause stack corruption. Declaration error of mb array in qla2x00_iidma_fcport cause data to be written beyond the array. This ends up causing stack corruption. Signed-off-by: Quinn Tran Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d5b10ecde4a0..ab22ccf4c7d3 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3192,7 +3192,7 @@ static void qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { int rval; - uint16_t mb[4]; + uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; if (!IS_IIDMA_CAPABLE(ha)) -- cgit From 25232cc9b8eca0406f51e61e944e854ed021a9d7 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 05:16:54 -0400 Subject: qla2xxx: Enable diagnostic port using NVRAM parameters. Signed-off-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_def.h | 1 + drivers/scsi/qla2xxx/qla_init.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c161d60759e9..c29f4653db18 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3136,6 +3136,7 @@ struct qla_hw_data { #define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) #define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) #define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) +#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) /* HBA serial number */ uint8_t serial0; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index ab22ccf4c7d3..1b917711cda4 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2196,6 +2196,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha) mid_init_cb->options = __constant_cpu_to_le16(BIT_1); mid_init_cb->init_cb.execution_throttle = cpu_to_le16(ha->fw_xcb_count); + /* D-Port Status */ + if (IS_DPORT_CAPABLE(ha)) + mid_init_cb->init_cb.firmware_options_1 |= + cpu_to_le16(BIT_7); } rval = qla2x00_init_firmware(vha, ha->init_cb_size); -- cgit From 2d5a4c344ad35eaaab59a5079eb6c62a843fb9d6 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 05:16:55 -0400 Subject: qla2xxx: Disable laser for ISP2031 while unloading driver. Nameserver data on FC switch is not refreshed when qla2xxx driver is unloaded. Disabling laser for ISP2031 will force FC switch to rescan ports and clear fdmi entries from Nameserver. Signed-off-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_dbg.c | 2 +- drivers/scsi/qla2xxx/qla_def.h | 5 +++++ drivers/scsi/qla2xxx/qla_os.c | 31 +++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 2d5610be2d70..b21adb3d5e67 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -11,7 +11,7 @@ * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- - * | Module Init and Probe | 0x017d | 0x004b,0x0141 | + * | Module Init and Probe | 0x017d | 0x0141 | * | | | 0x0144,0x0146 | * | | | 0x015b-0x0160 | * | | | 0x016e-0x0170 | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c29f4653db18..d9daad7db6ce 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -191,6 +191,11 @@ * reset-recovery completion is * second */ +/* ISP2031: Values for laser on/off */ +#define PORT_0_2031 0x00201340 +#define PORT_1_2031 0x00201350 +#define LASER_ON_2031 0x01800100 +#define LASER_OFF_2031 0x01800180 /* * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 72b94f9ca637..e742890d5d1b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -240,6 +240,7 @@ static int qla2x00_change_queue_depth(struct scsi_device *, int, int); static int qla2x00_change_queue_type(struct scsi_device *, int); static void qla2x00_clear_drv_active(struct qla_hw_data *); static void qla2x00_free_device(scsi_qla_host_t *); +static void qla83xx_disable_laser(scsi_qla_host_t *vha); struct scsi_host_template qla2xxx_driver_template = { .module = THIS_MODULE, @@ -3177,6 +3178,10 @@ qla2x00_remove_one(struct pci_dev *pdev) qla84xx_put_chip(base_vha); + /* Laser should be disabled only for ISP2031 */ + if (IS_QLA2031(ha)) + qla83xx_disable_laser(base_vha); + /* Disable timer */ if (base_vha->timer_active) qla2x00_stop_timer(base_vha); @@ -5701,6 +5706,32 @@ qla2xxx_pci_resume(struct pci_dev *pdev) ha->flags.eeh_busy = 0; } +static void +qla83xx_disable_laser(scsi_qla_host_t *vha) +{ + uint32_t reg, data, fn; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24; + + /* pci func #/port # */ + ql_dbg(ql_dbg_init, vha, 0x004b, + "Disabling Laser for hba: %p\n", vha); + + fn = (RD_REG_DWORD(&isp_reg->ctrl_status) & + (BIT_15|BIT_14|BIT_13|BIT_12)); + + fn = (fn >> 12); + + if (fn & 1) + reg = PORT_1_2031; + else + reg = PORT_0_2031; + + data = LASER_OFF_2031; + + qla83xx_wr_reg(vha, reg, data); +} + static const struct pci_error_handlers qla2xxx_err_handler = { .error_detected = qla2xxx_pci_error_detected, .mmio_enabled = qla2xxx_pci_mmio_enabled, -- cgit From 7473952e8de0eefed006bd415539f8161e87130d Mon Sep 17 00:00:00 2001 From: Nigel Kirkland Date: Thu, 25 Sep 2014 05:16:56 -0400 Subject: qla2xxx: Free sysfs attributes for ISP27xx. Signed-off-by: Nigel Kirkland Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_attr.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 677239f41ea5..9391e4bf1fbb 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -987,6 +987,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) continue; if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) continue; + if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw)) + continue; sysfs_remove_bin_file(&host->shost_gendev.kobj, iter->attr); -- cgit From f3982d89317797f34d4996ab36d4408f956e7a7c Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Thu, 25 Sep 2014 05:16:57 -0400 Subject: qla2xxx: Force use of mailbox interface for flash access commands for ISP27xx. Signed-off-by: Chad Dupuis Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_init.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 1b917711cda4..51cabb953552 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1848,7 +1848,9 @@ enable_82xx_npiv: spin_unlock_irqrestore(&ha->hardware_lock, flags); } - if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { + if (IS_QLA27XX(ha)) + ha->flags.fac_supported = 1; + else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { uint32_t size; rval = qla81xx_fac_get_sector_size(vha, &size); -- cgit From 9a6e6400a9bef4a7fb11ccddf932e1868f566a73 Mon Sep 17 00:00:00 2001 From: Alex Vechersky Date: Thu, 25 Sep 2014 05:16:58 -0400 Subject: qla2xxx: Add missing ISP27xx checks to optrom code. Signed-off-by: Alex Vechersky Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_attr.c | 3 ++- drivers/scsi/qla2xxx/qla_bsg.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 9391e4bf1fbb..82b92c414a9c 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -484,7 +484,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, start == (ha->flt_region_fw * 4)) valid = 1; else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) - || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) + || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) + || IS_QLA27XX(ha)) valid = 1; if (!valid) { ql_log(ql_log_warn, vha, 0x7065, diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 524f9eb7fcd1..2e2bb6f45ce6 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -1390,7 +1390,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha, start == (ha->flt_region_fw * 4)) valid = 1; else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || - IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) + IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) valid = 1; if (!valid) { ql_log(ql_log_warn, vha, 0x7058, -- cgit From e05fe2924892a6c3c23dc818e94ce80dbceb86b1 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Thu, 25 Sep 2014 05:16:59 -0400 Subject: qla2xxx: Honor FCP_RSP retry delay timer field. Parse the retry delay timer field from the FCP response data and if: - It is not zero - The SCSI status is busy or queue full return SCSI_MLQUEUE_TARGET_BUSY for the number of milliseconds specified in the retry delay timer field. Signed-off-by: Chad Dupuis Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_def.h | 2 ++ drivers/scsi/qla2xxx/qla_fw.h | 2 +- drivers/scsi/qla2xxx/qla_inline.h | 8 ++++++++ drivers/scsi/qla2xxx/qla_isr.c | 12 ++++++++++++ drivers/scsi/qla2xxx/qla_os.c | 9 +++++++++ 5 files changed, 32 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index d9daad7db6ce..d529510bbc6c 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2023,6 +2023,8 @@ typedef struct fc_port { unsigned long last_ramp_up; uint16_t port_id; + + unsigned long retry_delay_timestamp; } fc_port_t; #include "qla_mr.h" diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 7f2e1c71cc31..42bb357bf56b 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -567,7 +567,7 @@ struct sts_entry_24xx { #define SF_TRANSFERRED_DATA BIT_11 #define SF_FCP_RSP_DMA BIT_0 - uint16_t reserved_2; + uint16_t retry_delay; uint16_t scsi_status; /* SCSI status. */ #define SS_CONFIRMATION_REQ BIT_12 diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index b3b1d6fc2d6c..fee9eb7c8a60 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -279,3 +279,11 @@ qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status) complete(&ha->mbx_intr_comp); } } + +static inline void +qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay) +{ + if (retry_delay) + fcport->retry_delay_timestamp = jiffies + + (retry_delay * HZ / 10); +} diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 550ffdf0bf17..f15f87e1abd8 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1983,6 +1983,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) int logit = 1; int res = 0; uint16_t state_flags = 0; + uint16_t retry_delay = 0; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; @@ -2076,6 +2077,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) host_to_fcp_swap(sts24->data, sizeof(sts24->data)); ox_id = le16_to_cpu(sts24->ox_id); par_sense_len = sizeof(sts24->data); + /* Valid values of the retry delay timer are 0x1-0xffef */ + if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) + retry_delay = sts24->retry_delay; } else { if (scsi_status & SS_SENSE_LEN_VALID) sense_len = le16_to_cpu(sts->req_sense_length); @@ -2109,6 +2113,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) scsi_status & SS_RESIDUAL_OVER) comp_status = CS_DATA_OVERRUN; + /* + * Check retry_delay_timer value if we receive a busy or + * queue full. + */ + if (lscsi_status == SAM_STAT_TASK_SET_FULL || + lscsi_status == SAM_STAT_BUSY) + qla2x00_set_retry_delay_timestamp(fcport, retry_delay); + /* * Based on Host and scsi status generate status code for Linux */ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e742890d5d1b..daabf8c9c298 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -731,6 +731,15 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto qc24_target_busy; } + /* + * Return target busy if we've received a non-zero retry_delay_timer + * in a FCP_RSP. + */ + if (time_after(jiffies, fcport->retry_delay_timestamp)) + fcport->retry_delay_timestamp = 0; + else + goto qc24_target_busy; + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) goto qc24_host_busy; -- cgit From 2486c62765d73ad8f206ba43b8ede09e689bd803 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 05:17:00 -0400 Subject: qla2xxx: Restore WWPN in case of Loop Dead. For FA-WWPN capable device, ISP2031 and ISP27XX, when loop dead is detected by a driver, restore WWPN from NVRAM. Signed-off-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_dbg.c | 3 +-- drivers/scsi/qla2xxx/qla_def.h | 3 ++- drivers/scsi/qla2xxx/qla_init.c | 5 +++++ drivers/scsi/qla2xxx/qla_isr.c | 10 ++++++++++ 4 files changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index b21adb3d5e67..5f25cf0db568 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -11,8 +11,7 @@ * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- - * | Module Init and Probe | 0x017d | 0x0141 | - * | | | 0x0144,0x0146 | + * | Module Init and Probe | 0x017d | 0x0144,0x0146 | * | | | 0x015b-0x0160 | * | | | 0x016e-0x0170 | * | Mailbox commands | 0x118d | 0x1115-0x1116 | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index d529510bbc6c..a3bab3a3efdd 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2945,7 +2945,8 @@ struct qla_hw_data { uint32_t mr_reset_hdlr_active:1; uint32_t mr_intr_valid:1; - /* 34 bits */ + uint32_t fawwpn_enabled:1; + /* 35 bits */ } flags; /* This spinlock is used to protect "io transactions", you must diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 51cabb953552..23f12dddcb4a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2202,6 +2202,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha) if (IS_DPORT_CAPABLE(ha)) mid_init_cb->init_cb.firmware_options_1 |= cpu_to_le16(BIT_7); + /* Enable FA-WWPN */ + ha->flags.fawwpn_enabled = + (mid_init_cb->init_cb.firmware_options_1 & BIT_6) ? 1 : 0; + ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n", + (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); } rval = qla2x00_init_firmware(vha, ha->init_cb_size); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index f15f87e1abd8..696e4a2d3fa7 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -752,6 +752,16 @@ skip_rio: if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + /* + * In case of loop down, restore WWPN from + * NVRAM in case of FA-WWPN capable ISP + */ + if (ha->flags.fawwpn_enabled) { + void *wwpn = ha->init_cb->port_name; + + memcpy(vha->port_name, wwpn, WWN_SIZE); + } + vha->device_flags |= DFLG_NO_CABLE; qla2x00_mark_all_devices_lost(vha, 1); } -- cgit From ef86cb2059a14b4024c7320999ee58e938873032 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Thu, 25 Sep 2014 05:17:01 -0400 Subject: qla2xxx: Mark port lost when we receive an RSCN for it. Signed-off-by: Chad Dupuis Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_isr.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 696e4a2d3fa7..a04a1b1f7f32 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -575,8 +575,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; - uint32_t rscn_entry, host_pid; + uint32_t rscn_entry, host_pid, tmp_pid; unsigned long flags; + fc_port_t *fcport = NULL; /* Setup to process RIO completion. */ handle_cnt = 0; @@ -979,6 +980,20 @@ skip_rio: if (qla2x00_is_a_vp_did(vha, rscn_entry)) break; + /* + * Search for the rport related to this RSCN entry and mark it + * as lost. + */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (atomic_read(&fcport->state) != FCS_ONLINE) + continue; + tmp_pid = fcport->d_id.b24; + if (fcport->d_id.b24 == rscn_entry) { + qla2x00_mark_device_lost(vha, fcport, 0, 0); + break; + } + } + atomic_set(&vha->loop_down_timer, 0); vha->flags.management_server_logged_in = 0; -- cgit From 61d41f610debd557b1c451338fa0afaac9dd8719 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Thu, 25 Sep 2014 05:17:02 -0400 Subject: qla2xxx: Disable PCI device in shutdown handler. Disable the PCI device during shutdown to prevent any races with other PCI code such as the AER handling code. Signed-off-by: Chad Dupuis Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_os.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index daabf8c9c298..2c3c6afe88a3 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3032,6 +3032,9 @@ qla2x00_shutdown(struct pci_dev *pdev) qla2x00_free_irqs(vha); qla2x00_free_fw_dump(ha); + + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); } /* Deletes all the virtual ports for a given ha */ -- cgit From f261f7aff7badc76b266c6df799446ee24313449 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Thu, 25 Sep 2014 05:17:03 -0400 Subject: qla2xxx: Fail adapter initialization on load ram failure. If we fail to load one of the segments of firmware fail the operation instead of trying to continue which will lead to a system crash since the pointers to the next segment will not be updated correctly. Signed-off-by: Chad Dupuis Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 23f12dddcb4a..1d66f2fae881 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -5236,7 +5236,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, ql_log(ql_log_fatal, vha, 0x008f, "Failed to load segment %d of firmware.\n", fragment); - break; + return QLA_FUNCTION_FAILED; } faddr += dlen; @@ -5539,7 +5539,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) ql_log(ql_log_fatal, vha, 0x0098, "Failed to load segment %d of firmware.\n", fragment); - break; + return QLA_FUNCTION_FAILED; } fwcode += dlen; -- cgit From 7ab3d962085f8b4953aca7517611bd3d164eb323 Mon Sep 17 00:00:00 2001 From: Sawan Chandak Date: Thu, 25 Sep 2014 05:17:04 -0400 Subject: qla2xxx: Move warning message to debug level. Signed-off-by: Sawan Chandak Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_nx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 25626006baff..54cb2ac9339b 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -857,7 +857,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha) break; if (timeout >= qla82xx_rom_lock_timeout) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); - ql_log(ql_log_warn, vha, 0xb157, + ql_dbg(ql_dbg_p3p, vha, 0xb157, "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", __func__, ha->portnum, lock_owner); return -1; -- cgit From 6d78e5576b9450ad795577c1a31b20aafad4dd71 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Thu, 25 Sep 2014 05:17:05 -0400 Subject: qla2xxx: Fix sparse warning in qla_iocb.c file. Signed-off-by: Joe Carnuccio Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_iocb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 150529d98db4..6a6867d24d9f 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2648,7 +2648,7 @@ queuing_error: return QLA_FUNCTION_FAILED; } -void +static void qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) { struct srb_iocb *aio = &sp->u.iocb_cmd; -- cgit From 1b5fd56466967d0092ba489e45343d7c3317ed95 Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Thu, 25 Sep 2014 05:17:06 -0400 Subject: qla2xxx: Update the driver version to 8.07.00.16-k. Signed-off-by: Giridhar Malavali Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 4d2c98cbec4f..d88b86214ec5 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.07.00.08-k" +#define QLA2XXX_VERSION "8.07.00.16-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 7 -- cgit From 4d6609c47373ce85ed887ae471b34fb188f9c2b8 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 06:14:43 -0400 Subject: qla2xxx: Fix sparse warnings in tcm_qla2xxx.c Signed-off-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/tcm_qla2xxx.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index e2beab962096..09d2931774f2 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -50,8 +50,12 @@ #include "qla_target.h" #include "tcm_qla2xxx.h" -struct workqueue_struct *tcm_qla2xxx_free_wq; -struct workqueue_struct *tcm_qla2xxx_cmd_wq; +static struct workqueue_struct *tcm_qla2xxx_free_wq; +static struct workqueue_struct *tcm_qla2xxx_cmd_wq; + +/* Local pointer to allocated TCM configfs fabric module */ +static struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; +static struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; /* * Parse WWN. @@ -734,10 +738,6 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) cmd->sg_mapped = 0; } -/* Local pointer to allocated TCM configfs fabric module */ -struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; -struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; - static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); /* -- cgit From 78c2106a50e067f7168ee8c0944baaeb0e988272 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 06:14:44 -0400 Subject: qla2xxx: fix kernel NULL pointer access This patch is to fix regression added by commit id 51a07f84649d2be206c4c2ad9a612956db0c2f8c. When allocating memory for new session original patch does not assign vha to op->vha resulting into NULL pointer access during qlt_create_sess_from_atio(). Cc: Signed-off-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_target.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index aebe62c9246d..43f91778371b 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -3277,6 +3277,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, return -ENOMEM; memcpy(&op->atio, atio, sizeof(*atio)); + op->vha = vha; INIT_WORK(&op->work, qlt_create_sess_from_atio); queue_work(qla_tgt_wq, &op->work); return 0; -- cgit From 75554b68ac1e018bca00d68a430b92ada8ab52dd Mon Sep 17 00:00:00 2001 From: Arun Easi Date: Thu, 25 Sep 2014 06:14:45 -0400 Subject: qla2xxx: Use correct offset to req-q-out for reserve calculation Cc: Signed-off-by: Arun Easi Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_target.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 43f91778371b..2fe6da3c03bb 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1431,12 +1431,10 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha, static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, uint32_t req_cnt) { - struct qla_hw_data *ha = vha->hw; - device_reg_t __iomem *reg = ha->iobase; uint32_t cnt; if (vha->req->cnt < (req_cnt + 2)) { - cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out); + cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out); ql_dbg(ql_dbg_tgt, vha, 0xe00a, "Request ring circled: cnt=%d, vha->->ring_index=%d, " -- cgit From 940070374441fc78ad6d994f6ce67d5c6646e6f6 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 06:14:46 -0400 Subject: qla2xxx: Increase room in request queue for sending priority packets Signed-off-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_iocb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 6a6867d24d9f..c787847b3a46 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1901,7 +1901,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) skip_cmd_array: /* Check for room on request queue. */ - if (req->cnt < req_cnt) { + if (req->cnt < req_cnt + 2) { if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) cnt = RD_REG_DWORD(®->isp25mq.req_q_out); else if (IS_P3P_TYPE(ha)) @@ -1920,7 +1920,7 @@ skip_cmd_array: req->cnt = req->length - (req->ring_index - cnt); } - if (req->cnt < req_cnt) + if (req->cnt < req_cnt + 2) goto queuing_error; /* Prep packet */ -- cgit From 667024a36549186ce334c1027e434b45cf2c943d Mon Sep 17 00:00:00 2001 From: Arun Easi Date: Thu, 25 Sep 2014 06:14:47 -0400 Subject: qla2xxx: Remove verbose messages in target mode. Turning logging bits for target mode ON dumps quite a lot verbose messages, remove those and change some of the IO path logging to use IO bits. Signed-off-by: Arun Easi Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_target.c | 154 ++++---------------------------------- 1 file changed, 16 insertions(+), 138 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 2fe6da3c03bb..1503cf4120cf 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1397,8 +1397,6 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) } } - ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", - prm->seg_cnt, prm->req_cnt); return 0; out_err: @@ -1436,10 +1434,6 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, if (vha->req->cnt < (req_cnt + 2)) { cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out); - ql_dbg(ql_dbg_tgt, vha, 0xe00a, - "Request ring circled: cnt=%d, vha->->ring_index=%d, " - "vha->req->cnt=%d, req_cnt=%d\n", cnt, - vha->req->ring_index, vha->req->cnt, req_cnt); if (vha->req->ring_index < cnt) vha->req->cnt = cnt - vha->req->ring_index; else @@ -1448,7 +1442,7 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, } if (unlikely(vha->req->cnt < (req_cnt + 2))) { - ql_dbg(ql_dbg_tgt, vha, 0xe00b, + ql_dbg(ql_dbg_io, vha, 0x305a, "qla_target(%d): There is no room in the " "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, @@ -1489,7 +1483,7 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) if (h > DEFAULT_OUTSTANDING_COMMANDS) h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ if (h == ha->tgt.current_handle) { - ql_dbg(ql_dbg_tgt, vha, 0xe04e, + ql_dbg(ql_dbg_io, vha, 0x305b, "qla_target(%d): Ran out of " "empty cmd slots in ha %p\n", vha->vp_idx, ha); h = QLA_TGT_NULL_HANDLE; @@ -1546,9 +1540,6 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, pkt->u.status0.ox_id = cpu_to_le16(temp); pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); - ql_dbg(ql_dbg_tgt, vha, 0xe00c, - "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", - vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp); return 0; } @@ -1606,14 +1597,6 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, } *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); - ql_dbg(ql_dbg_tgt, vha, 0xe00d, - "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n", - (long long unsigned int) - pci_dma_hi32(sg_dma_address(prm->sg)), - (long long unsigned int) - pci_dma_lo32(sg_dma_address(prm->sg)), - (int)sg_dma_len(prm->sg)); - prm->sg = sg_next(prm->sg); } } @@ -1631,11 +1614,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm, int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; - ql_dbg(ql_dbg_tgt, vha, 0xe00e, - "iocb->scsi_status=%x, iocb->flags=%x\n", - le16_to_cpu(pkt24->u.status0.scsi_status), - le16_to_cpu(pkt24->u.status0.flags)); - pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); /* Setup packet address segment pointer */ @@ -1653,7 +1631,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm, } /* If scatter gather */ - ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments..."); /* Load command entry data segments */ for (cnt = 0; @@ -1668,14 +1645,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm, } *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); - ql_dbg(ql_dbg_tgt, vha, 0xe010, - "S/G Segment phys_addr=%llx:%llx, len=%d\n", - (long long unsigned int)pci_dma_hi32(sg_dma_address( - prm->sg)), - (long long unsigned int)pci_dma_lo32(sg_dma_address( - prm->sg)), - (int)sg_dma_len(prm->sg)); - prm->sg = sg_next(prm->sg); } @@ -1713,10 +1682,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; } - ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n", - vha->vp_idx, cmd->tag, - be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); - prm->cmd = cmd; prm->tgt = tgt; prm->rq_result = scsi_status; @@ -1727,15 +1692,10 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, prm->req_cnt = 1; prm->add_status_pkt = 0; - ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n", - prm->rq_result, xmit_type); - /* Send marker if required */ if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) return -EFAULT; - ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx); - if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { if (qlt_pci_map_calc_cnt(prm) != 0) return -EAGAIN; @@ -1745,7 +1705,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { prm->residual = se_cmd->residual_count; - ql_dbg(ql_dbg_tgt, vha, 0xe014, + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c, "Residual underflow: %d (tag %d, " "op %x, bufflen %d, rq_result %x)\n", prm->residual, cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, @@ -1753,7 +1713,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, prm->rq_result |= SS_RESIDUAL_UNDER; } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { prm->residual = se_cmd->residual_count; - ql_dbg(ql_dbg_tgt, vha, 0xe015, + ql_dbg(ql_dbg_io, vha, 0x305d, "Residual overflow: %d (tag %d, " "op %x, bufflen %d, rq_result %x)\n", prm->residual, cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, @@ -1776,10 +1736,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, } } - ql_dbg(ql_dbg_tgt, vha, 0xe016, - "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n", - prm->req_cnt, *full_req_cnt, prm->add_status_pkt); - return 0; } @@ -2356,8 +2312,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, struct ctio7_to_24xx *ctio = (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); - ql_dbg(ql_dbg_tgt, vha, 0xe019, - "Building additional status packet\n"); + ql_dbg(ql_dbg_io, vha, 0x305e, + "Building additional status packet 0x%p.\n", + ctio); /* * T10Dif: ctio_crc2_to_fw overlay ontop of @@ -2390,10 +2347,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ - ql_dbg(ql_dbg_tgt, vha, 0xe01a, - "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", - pkt, scsi_status); - qla2x00_start_iocbs(vha, vha->req); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2428,11 +2381,6 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) return -EIO; - ql_dbg(ql_dbg_tgt, vha, 0xe01b, - "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n", - __func__, (int)vha->vp_idx, &cmd->se_cmd, - be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); - /* Calculate number of entries and segments required */ if (qlt_pci_map_calc_cnt(&prm) != 0) return -EAGAIN; @@ -2861,11 +2809,9 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, CTIO_INTERMEDIATE_HANDLE_MARK); if (handle != QLA_TGT_NULL_HANDLE) { - if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) { - ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s", - "SKIP_HANDLE CTIO\n"); + if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) return NULL; - } + /* handle-1 is actually used */ if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { ql_dbg(ql_dbg_tgt, vha, 0xe052, @@ -2903,10 +2849,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, struct target_core_fabric_ops *tfo; struct qla_tgt_cmd *cmd; - ql_dbg(ql_dbg_tgt, vha, 0xe01e, - "qla_target(%d): handle(ctio %p status %#x) <- %08x\n", - vha->vp_idx, ctio, status, handle); - if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { /* That could happen only in case of an error/reset/abort */ if (status != CTIO_SUCCESS) { @@ -3017,7 +2959,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, skip_term: if (cmd->state == QLA_TGT_STATE_PROCESSED) { - ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); + ; } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { int rx_status = 0; @@ -3028,10 +2970,6 @@ skip_term: else cmd->write_data_transferred = 1; - ql_dbg(ql_dbg_tgt, vha, 0xe020, - "Data received, context %x, rx_status %d\n", - 0x0, rx_status); - ha->tgt.tgt_ops->handle_data(cmd); return; } else if (cmd->state == QLA_TGT_STATE_ABORTED) { @@ -3126,11 +3064,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) &atio->u.isp24.fcp_cmnd.add_cdb[ atio->u.isp24.fcp_cmnd.add_cdb_len])); - ql_dbg(ql_dbg_tgt, vha, 0xe022, - "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n", - cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length, - cmd->atio.u.isp24.fcp_hdr.ox_id); - ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, fcp_task_attr, data_dir, bidi); if (ret != 0) @@ -3144,7 +3077,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) return; out_term: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); + ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); /* * cmd has not sent to target yet, so pass NULL as the second * argument to qlt_send_term_exchange() and free the memory here. @@ -3262,7 +3195,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd; if (unlikely(tgt->tgt_stop)) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, + ql_dbg(ql_dbg_io, vha, 0x3061, "New command while device %p is shutting down\n", tgt); return -EFAULT; } @@ -3287,7 +3220,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, cmd = qlt_get_tag(vha, sess, atio); if (!cmd) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, + ql_dbg(ql_dbg_io, vha, 0x3062, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); ha->tgt.tgt_ops->put_sess(sess); return -ENOMEM; @@ -4138,7 +4071,7 @@ static void qlt_send_busy(struct scsi_qla_host *vha, pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); if (!pkt) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e, + ql_dbg(ql_dbg_io, vha, 0x3063, "qla_target(%d): %s failed: unable to allocate " "request packet", vha->vp_idx, __func__); return; @@ -4185,14 +4118,10 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, int rc; if (unlikely(tgt == NULL)) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039, + ql_dbg(ql_dbg_io, vha, 0x3064, "ATIO pkt, but no tgt (ha %p)", ha); return; } - ql_dbg(ql_dbg_tgt, vha, 0xe02c, - "qla_target(%d): ATIO pkt %p: type %02x count %02x", - vha->vp_idx, atio, atio->u.raw.entry_type, - atio->u.raw.entry_count); /* * In tgt_stop mode we also should allow all requests to pass. * Otherwise, some commands can stuck. @@ -4202,23 +4131,9 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, switch (atio->u.raw.entry_type) { case ATIO_TYPE7: - ql_dbg(ql_dbg_tgt, vha, 0xe02d, - "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n", - vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, - atio->u.isp24.fcp_cmnd.rddata, - atio->u.isp24.fcp_cmnd.wrdata, - atio->u.isp24.fcp_cmnd.cdb[0], - atio->u.isp24.fcp_cmnd.add_cdb_len, - be32_to_cpu(get_unaligned((uint32_t *) - &atio->u.isp24.fcp_cmnd.add_cdb[ - atio->u.isp24.fcp_cmnd.add_cdb_len])), - atio->u.isp24.fcp_hdr.s_id[0], - atio->u.isp24.fcp_hdr.s_id[1], - atio->u.isp24.fcp_hdr.s_id[2]); - if (unlikely(atio->u.isp24.exchange_addr == ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { - ql_dbg(ql_dbg_tgt, vha, 0xe058, + ql_dbg(ql_dbg_io, vha, 0x3065, "qla_target(%d): ATIO_TYPE7 " "received with UNKNOWN exchange address, " "sending QUEUE_FULL\n", vha->vp_idx); @@ -4292,11 +4207,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) return; } - ql_dbg(ql_dbg_tgt, vha, 0xe02f, - "qla_target(%d): response pkt %p: T %02x C %02x S %02x " - "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type, - pkt->entry_count, pkt->entry_status, pkt->handle); - /* * In tgt_stop mode we also should allow all requests to pass. * Otherwise, some commands can stuck. @@ -4309,9 +4219,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; - ql_dbg(ql_dbg_tgt, vha, 0xe030, - "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n", - entry->entry_type, vha->vp_idx); qlt_do_ctio_completion(vha, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); @@ -4322,15 +4229,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) { struct atio_from_isp *atio = (struct atio_from_isp *)pkt; int rc; - ql_dbg(ql_dbg_tgt, vha, 0xe031, - "ACCEPT_TGT_IO instance %d status %04x " - "lun %04x read/write %d data_length %04x " - "target_id %02x rx_id %04x\n ", vha->vp_idx, - le16_to_cpu(atio->u.isp2x.status), - le16_to_cpu(atio->u.isp2x.lun), - atio->u.isp2x.execution_codes, - le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha, - atio), atio->u.isp2x.rx_id); if (atio->u.isp2x.status != __constant_cpu_to_le16(ATIO_CDB_VALID)) { ql_dbg(ql_dbg_tgt, vha, 0xe05e, @@ -4339,10 +4237,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) le16_to_cpu(atio->u.isp2x.status)); break; } - ql_dbg(ql_dbg_tgt, vha, 0xe032, - "FCP CDB: 0x%02x, sizeof(cdb): %lu", - atio->u.isp2x.cdb[0], (unsigned long - int)sizeof(atio->u.isp2x.cdb)); rc = qlt_handle_cmd_for_atio(vha, atio); if (unlikely(rc != 0)) { @@ -4375,8 +4269,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) case CONTINUE_TGT_IO_TYPE: { struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; - ql_dbg(ql_dbg_tgt, vha, 0xe033, - "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx); qlt_do_ctio_completion(vha, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); @@ -4386,8 +4278,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) case CTIO_A64_TYPE: { struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; - ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n", - vha->vp_idx); qlt_do_ctio_completion(vha, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); @@ -4491,11 +4381,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; int login_code; - ql_dbg(ql_dbg_tgt, vha, 0xe039, - "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", - vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done, - ha->operating_mode, ha->current_topology); - if (!ha->tgt.tgt_ops) return; @@ -4572,11 +4457,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, break; default: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, - "qla_target(%d): Async event %#x occurred: " - "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, - code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), - le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); break; } @@ -4597,8 +4477,6 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, return NULL; } - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id); - fcport->loop_id = loop_id; rc = qla2x00_get_port_database(vha, fcport, 0); -- cgit From d154f35029c1f77b589389d2afb1e1766fb80c28 Mon Sep 17 00:00:00 2001 From: Arun Easi Date: Thu, 25 Sep 2014 06:14:48 -0400 Subject: qla2xxx: Enable SLER conditionally in target mode. Sequence level error recovery (aka FC Tape) is not really required for disk devices. On heavily loaded system, with slow turn around, a bunch of status enquiries using REC puts additional burden to the target, so just turn off SLER by default. Signed-off-by: Arun Easi Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_target.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 1503cf4120cf..85587fcd5a38 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -42,6 +42,11 @@ #include "qla_def.h" #include "qla_target.h" +static int ql2xtgt_tape_enable; +module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xtgt_tape_enable, + "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); + static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; module_param(qlini_mode, charp, S_IRUGO); MODULE_PARM_DESC(qlini_mode, @@ -5172,8 +5177,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); /* Enable initial LIP */ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); - /* Enable FC tapes support */ - nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); + if (ql2xtgt_tape_enable) + /* Enable FC Tape support */ + nv->firmware_options_2 |= cpu_to_le32(BIT_12); + else + /* Disable FC Tape support */ + nv->firmware_options_2 &= cpu_to_le32(~BIT_12); + /* Disable Full Login after LIP */ nv->host_p &= __constant_cpu_to_le32(~BIT_10); /* Enable target PRLI control */ @@ -5255,8 +5265,13 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); /* Enable initial LIP */ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); - /* Enable FC tapes support */ - nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); + if (ql2xtgt_tape_enable) + /* Enable FC tape support */ + nv->firmware_options_2 |= cpu_to_le32(BIT_12); + else + /* Disable FC tape support */ + nv->firmware_options_2 &= cpu_to_le32(~BIT_12); + /* Disable Full Login after LIP */ nv->host_p &= __constant_cpu_to_le32(~BIT_10); /* Enable target PRLI control */ -- cgit From d29fb7360ee61ec02257e9c49848c9d2a05345a0 Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Thu, 25 Sep 2014 06:14:49 -0400 Subject: qla2xxx: Add counter for message Signed-off-by: Saurav Kashyap Signed-off-by: Giridhar Malavali Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_target.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 85587fcd5a38..1a546bc2bdd4 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1434,10 +1434,11 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha, static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, uint32_t req_cnt) { - uint32_t cnt; + uint32_t cnt, cnt_in; if (vha->req->cnt < (req_cnt + 2)) { cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out); + cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in); if (vha->req->ring_index < cnt) vha->req->cnt = cnt - vha->req->ring_index; @@ -1448,10 +1449,9 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, if (unlikely(vha->req->cnt < (req_cnt + 2))) { ql_dbg(ql_dbg_io, vha, 0x305a, - "qla_target(%d): There is no room in the " - "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " - "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, - vha->req->cnt, req_cnt); + "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", + vha->vp_idx, vha->req->ring_index, + vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length); return -EAGAIN; } vha->req->cnt -= req_cnt; -- cgit From dd9c4eff77fc7412fa79a1a05a2dec2469cc8ca8 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 06:14:50 -0400 Subject: qla2xxx: fix crash due to task mgmt cmd type tcm_qla2xxx_get_task_tag incorrectly assume command is qla_tgt_cmd. Add check to see if it's a scsi cmd or task mgmt command. Signed-off-by: Quinn Tran Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/tcm_qla2xxx.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 09d2931774f2..9f954074acd9 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -515,8 +515,13 @@ static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl) static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd) { - struct qla_tgt_cmd *cmd = container_of(se_cmd, - struct qla_tgt_cmd, se_cmd); + struct qla_tgt_cmd *cmd; + + /* check for task mgmt cmd */ + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + return 0xffffffff; + + cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); return cmd->tag; } -- cgit From c0cb44967b4a934252091557c8e46345f7ef7259 Mon Sep 17 00:00:00 2001 From: Arun Easi Date: Thu, 25 Sep 2014 06:14:51 -0400 Subject: qla2xxx: Add Host reset handling in target mode. Signed-off-by: Arun Easi Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_dbg.c | 2 +- drivers/scsi/qla2xxx/qla_gbl.h | 1 + drivers/scsi/qla2xxx/qla_os.c | 2 ++ drivers/scsi/qla2xxx/qla_target.c | 74 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 78 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 5f25cf0db568..83d47ab3fd1e 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -68,7 +68,7 @@ * | | | 0xd101-0xd1fe | * | | | 0xd214-0xd2fe | * | Target Mode | 0xe078 | | - * | Target Mode Management | 0xf072 | 0xf002-0xf003 | + * | Target Mode Management | 0xf072 | 0xf002 | * | | | 0xf046-0xf049 | * | Target Mode Task Management | 0x1000b | | * ---------------------------------------------------------------------- diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 0f851deec0e8..42ea4477c2ec 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -766,4 +766,5 @@ extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t); extern int qla8044_abort_isp(scsi_qla_host_t *); extern int qla8044_check_fw_alive(struct scsi_qla_host *); +extern void qlt_host_reset_handler(struct qla_hw_data *ha); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2c3c6afe88a3..08e5bbb12f0b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1363,6 +1363,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) struct qla_hw_data *ha = vha->hw; struct req_que *req; + qlt_host_reset_handler(ha); + spin_lock_irqsave(&ha->hardware_lock, flags); for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 1a546bc2bdd4..de45126ade87 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2843,6 +2843,80 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, return cmd; } +/* hardware_lock should be held by caller. */ +static void +qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t handle; + + if (cmd->sg_mapped) + qlt_unmap_sg(vha, cmd); + + handle = qlt_make_handle(vha); + + /* TODO: fix debug message type and ids. */ + if (cmd->state == QLA_TGT_STATE_PROCESSED) { + ql_dbg(ql_dbg_io, vha, 0xff00, + "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle); + } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { + cmd->write_data_transferred = 0; + cmd->state = QLA_TGT_STATE_DATA_IN; + + ql_dbg(ql_dbg_io, vha, 0xff01, + "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle); + + ha->tgt.tgt_ops->handle_data(cmd); + return; + } else if (cmd->state == QLA_TGT_STATE_ABORTED) { + ql_dbg(ql_dbg_io, vha, 0xff02, + "HOST-ABORT: handle=%d, state=ABORTED.\n", handle); + } else { + ql_dbg(ql_dbg_io, vha, 0xff03, + "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle, + cmd->state); + dump_stack(); + } + + ha->tgt.tgt_ops->free_cmd(cmd); +} + +void +qlt_host_reset_handler(struct qla_hw_data *ha) +{ + struct qla_tgt_cmd *cmd; + unsigned long flags; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + scsi_qla_host_t *vha = NULL; + struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt; + uint32_t i; + + if (!base_vha->hw->tgt.tgt_ops) + return; + + if (!tgt || qla_ini_mode_enabled(base_vha)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, + "Target mode disabled\n"); + return; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10, + "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n", + base_vha->dpc_flags); + + spin_lock_irqsave(&ha->hardware_lock, flags); + for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) { + cmd = qlt_get_cmd(base_vha, i); + if (!cmd) + continue; + /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */ + vha = cmd->vha; + qlt_abort_cmd_on_host_reset(vha, cmd); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + + /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ -- cgit From b6a029e1b0aba337ee54de66b1e428dd3bb624ca Mon Sep 17 00:00:00 2001 From: Arun Easi Date: Thu, 25 Sep 2014 06:14:52 -0400 Subject: qla2xxx: Handle chip reset in target mode. A chip reset can occur after driver submits command to the stack. Abort command processing if a chip reset has occurred or in progress when you get a follow up for a command. Signed-off-by: Arun Easi Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_def.h | 1 + drivers/scsi/qla2xxx/qla_gbl.h | 1 + drivers/scsi/qla2xxx/qla_init.c | 4 +++ drivers/scsi/qla2xxx/qla_iocb.c | 11 ++++++++ drivers/scsi/qla2xxx/qla_target.c | 56 ++++++++++++++++++++++++++++++++++++--- drivers/scsi/qla2xxx/qla_target.h | 2 ++ 6 files changed, 72 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index a3bab3a3efdd..e3643df3ae63 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3446,6 +3446,7 @@ struct qla_hw_data { struct work_struct board_disable; struct mr_data_fx00 mr; + uint32_t chip_reset; struct qlt_hw_data tgt; int allow_cna_fw_dump; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 42ea4477c2ec..b1865a72ce59 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -72,6 +72,7 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); +extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *); extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); extern fc_port_t * diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 1d66f2fae881..a4dde7e80dbd 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4575,6 +4575,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) /* Requeue all commands in outstanding command list. */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); } + + ha->chip_reset++; + /* memory barrier */ + wmb(); } /* diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index c787847b3a46..f0edb07f3198 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1858,6 +1858,17 @@ static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) } /* Generic Control-SRB manipulation functions. */ + +/* hardware_lock assumed to be held. */ +void * +qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp) +{ + if (qla2x00_reset_active(vha)) + return NULL; + + return qla2x00_alloc_iocbs(vha, sp); +} + void * qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index de45126ade87..38f3f1f014a4 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -106,6 +106,8 @@ static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked); static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, struct qla_tgt_srr_imm *imm, int ha_lock); +static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, + struct qla_tgt_cmd *cmd); /* * Global Variables */ @@ -1036,7 +1038,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) return; - resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); + resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL); if (!resp) { ql_dbg(ql_dbg_tgt, vha, 0xe04a, "qla_target(%d): %s failed: unable to allocate " @@ -1107,7 +1109,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) return; - ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); + ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL); if (ctio == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe04b, "qla_target(%d): %s failed: unable to allocate " @@ -1326,6 +1328,21 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) mcmd, mcmd->fc_tm_rsp, mcmd->flags); spin_lock_irqsave(&ha->hardware_lock, flags); + + if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) { + /* + * Either a chip reset is active or this request was from + * previous life, just abort the processing. + */ + ql_dbg(ql_dbg_async, vha, 0xe100, + "RESET-TMR active/old-count/new-count = %d/%d/%d.\n", + qla2x00_reset_active(vha), mcmd->reset_count, + ha->chip_reset); + ha->tgt.tgt_ops->free_mcmd(mcmd); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return; + } + if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); @@ -2269,6 +2286,21 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, spin_lock_irqsave(&ha->hardware_lock, flags); + if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { + /* + * Either a chip reset is active or this request was from + * previous life, just abort the processing. + */ + cmd->state = QLA_TGT_STATE_PROCESSED; + qlt_abort_cmd_on_host_reset(cmd->vha, cmd); + ql_dbg(ql_dbg_async, vha, 0xe101, + "RESET-RSP active/old-count/new-count = %d/%d/%d.\n", + qla2x00_reset_active(vha), cmd->reset_count, + ha->chip_reset); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return 0; + } + /* Does F/W have an IOCBs for this request */ res = qlt_check_reserve_free_req(vha, full_req_cnt); if (unlikely(res)) @@ -2392,6 +2424,21 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) spin_lock_irqsave(&ha->hardware_lock, flags); + if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { + /* + * Either a chip reset is active or this request was from + * previous life, just abort the processing. + */ + cmd->state = QLA_TGT_STATE_NEED_DATA; + qlt_abort_cmd_on_host_reset(cmd->vha, cmd); + ql_dbg(ql_dbg_async, vha, 0xe102, + "RESET-XFR active/old-count/new-count = %d/%d/%d.\n", + qla2x00_reset_active(vha), cmd->reset_count, + ha->chip_reset); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return 0; + } + /* Does F/W have an IOCBs for this request */ res = qlt_check_reserve_free_req(vha, prm.req_cnt); if (res != 0) @@ -2577,7 +2624,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); - pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); + pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); if (pkt == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe050, "qla_target(%d): %s failed: unable to allocate " @@ -3305,6 +3352,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, return -ENOMEM; } + cmd->reset_count = vha->hw->chip_reset; + INIT_WORK(&cmd->work, qlt_do_work); queue_work(qla_tgt_wq, &cmd->work); return 0; @@ -3338,6 +3387,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, } mcmd->tmr_func = fn; mcmd->flags = flags; + mcmd->reset_count = vha->hw->chip_reset; switch (fn) { case QLA_TGT_CLEAR_ACA: diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index d1d24fb0160a..20e42bdb6b0f 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -923,6 +923,7 @@ struct qla_tgt_cmd { uint32_t tag; uint32_t unpacked_lun; enum dma_data_direction dma_data_direction; + uint32_t reset_count; uint16_t loop_id; /* to save extra sess dereferences */ struct qla_tgt *tgt; /* to save extra sess dereferences */ @@ -958,6 +959,7 @@ struct qla_tgt_mgmt_cmd { struct se_cmd se_cmd; struct work_struct free_work; unsigned int flags; + uint32_t reset_count; #define QLA24XX_MGMT_SEND_NACK 1 union { struct atio_from_isp atio; -- cgit From 80187f8e7b05dff98c30909b668b5eb72482c3cb Mon Sep 17 00:00:00 2001 From: Arun Easi Date: Thu, 25 Sep 2014 06:14:53 -0400 Subject: qla2xxx: Host reset handling in ABTS path. ABTS path takes path similar to regular task management, but reset_count was not initialized when allocating mcmd causing the response to get dropped in qlt_xmit_tm_rsp. Fix this by initializing reset_count correctly. Signed-off-by: Arun Easi Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_target.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 38f3f1f014a4..ccd0ca25db54 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1185,6 +1185,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, mcmd->sess = sess; memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); + mcmd->reset_count = vha->hw->chip_reset; rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, abts->exchange_addr_to_abort); @@ -3523,6 +3524,7 @@ static int __qlt_abort_task(struct scsi_qla_host *vha, lun = a->u.isp24.fcp_cmnd.lun; unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); + mcmd->reset_count = vha->hw->chip_reset; rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, le16_to_cpu(iocb->u.isp2x.seq_id)); -- cgit From f2ea653fd448b814dd92b6554ede85abd9e22d9f Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Thu, 25 Sep 2014 06:14:54 -0400 Subject: qla2xxx: Increase the request queue size to 8K for ISP2031 Signed-off-by: Saurav Kashyap Signed-off-by: Giridhar Malavali Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_def.h | 1 + drivers/scsi/qla2xxx/qla_os.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e3643df3ae63..b048841a5360 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -266,6 +266,7 @@ #define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */ #define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */ #define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ +#define REQUEST_ENTRY_CNT_83XX 8192 /* Number of request entries. */ #define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ #define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ #define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 08e5bbb12f0b..34e56e2faa0e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2541,7 +2541,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->portnum = PCI_FUNC(ha->pdev->devfn); ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; - req_length = REQUEST_ENTRY_CNT_24XX; + req_length = REQUEST_ENTRY_CNT_83XX; rsp_length = RESPONSE_ENTRY_CNT_2300; ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; -- cgit From 33e7997755936ba92516c6ad69cd012c2e7d4dbb Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Thu, 25 Sep 2014 06:14:55 -0400 Subject: qla2xxx: Add support for QFull throttling and Term Exchange retry Through the qla target code, the qlt_send_term_exchange() routine is used in various different places to cleanup an exchange. For the case of IOCB request queue is full, the exchange is left unhandled/ dangling. Existing code does not have re-try logic to cleanup the exchange. This patch add retry logic to cleanup the exchange before letting new commands through. For the case of FW running out of exchanges, driver need to reply SAM_STAT_BUSY to the initiators. This patch add a pending queue for the busy reply in case IOCB queue is unable to handle the cmd. Cc: Signed-off-by: Quinn Tran Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_dbg.c | 4 +- drivers/scsi/qla2xxx/qla_def.h | 17 +++ drivers/scsi/qla2xxx/qla_os.c | 2 + drivers/scsi/qla2xxx/qla_target.c | 297 +++++++++++++++++++++++++++++++++++++- drivers/scsi/qla2xxx/qla_target.h | 4 + 5 files changed, 316 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 83d47ab3fd1e..d77fe43793b6 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -19,7 +19,7 @@ * | Device Discovery | 0x2016 | 0x2020-0x2022, | * | | | 0x2011-0x2012, | * | | | 0x2099-0x20a4 | - * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b | + * | Queue Command and IO tracing | 0x3059 | 0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | * | | | 0x302d,0x3033 | @@ -67,7 +67,7 @@ * | | | 0xd031-0xd0ff | * | | | 0xd101-0xd1fe | * | | | 0xd214-0xd2fe | - * | Target Mode | 0xe078 | | + * | Target Mode | 0xe079 | | * | Target Mode Management | 0xf072 | 0xf002 | * | | | 0xf046-0xf049 | * | Target Mode Task Management | 0x1000b | | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index b048841a5360..5f6b2960cccb 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2776,6 +2776,9 @@ struct qla_statistics { uint32_t control_requests; uint64_t jiffies_at_last_reset; + uint32_t stat_max_pend_cmds; + uint32_t stat_max_qfull_cmds_alloc; + uint32_t stat_max_qfull_cmds_dropped; }; struct bidi_statistics { @@ -2898,8 +2901,22 @@ struct qlt_hw_data { uint8_t saved_add_firmware_options[2]; uint8_t tgt_node_name[WWN_SIZE]; + + struct list_head q_full_list; + uint32_t num_pend_cmds; + uint32_t num_qfull_cmds_alloc; + uint32_t num_qfull_cmds_dropped; + spinlock_t q_full_lock; + uint32_t leak_exchg_thresh_hold; }; +#define MAX_QFULL_CMDS_ALLOC 8192 +#define Q_FULL_THRESH_HOLD_PERCENT 90 +#define Q_FULL_THRESH_HOLD(ha) \ + ((ha->fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT) + +#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ + /* * Qlogic host adapter specific data structure. */ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 34e56e2faa0e..dabd25429c58 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2398,6 +2398,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) "Memory allocated for ha=%p.\n", ha); ha->pdev = pdev; ha->tgt.enable_class_2 = ql2xenableclass2; + INIT_LIST_HEAD(&ha->tgt.q_full_list); + spin_lock_init(&ha->tgt.q_full_lock); /* Clear our data area */ ha->bars = bars; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ccd0ca25db54..9f248e7505ff 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -59,6 +59,8 @@ MODULE_PARM_DESC(qlini_mode, int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; +static int temp_sam_status = SAM_STAT_BUSY; + /* * From scsi/fc/fc_fcp.h */ @@ -108,6 +110,8 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, struct qla_tgt_srr_imm *imm, int ha_lock); static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd); +static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, + struct atio_from_isp *atio, uint16_t status, int qfull); /* * Global Variables */ @@ -185,6 +189,27 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, return NULL; } +static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) +{ + unsigned long flags; + + spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); + + vha->hw->tgt.num_pend_cmds++; + if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds) + vha->hw->qla_stats.stat_max_pend_cmds = + vha->hw->tgt.num_pend_cmds; + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); +} +static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) +{ + unsigned long flags; + + spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); + vha->hw->tgt.num_pend_cmds--; + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); +} + void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, struct atio_from_isp *atio) { @@ -2683,10 +2708,14 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, if (ha_locked) { rc = __qlt_send_term_exchange(vha, cmd, atio); + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, atio, 0, 0); goto done; } spin_lock_irqsave(&vha->hw->hardware_lock, flags); rc = __qlt_send_term_exchange(vha, cmd, atio); + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, atio, 0, 0); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); done: /* @@ -2711,6 +2740,53 @@ done: return; } +static void qlt_init_term_exchange(struct scsi_qla_host *vha) +{ + struct list_head free_list; + struct qla_tgt_cmd *cmd, *tcmd; + + vha->hw->tgt.leak_exchg_thresh_hold = + (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; + + cmd = tcmd = NULL; + if (!list_empty(&vha->hw->tgt.q_full_list)) { + INIT_LIST_HEAD(&free_list); + list_splice_init(&vha->hw->tgt.q_full_list, &free_list); + + list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { + list_del(&cmd->cmd_list); + /* This cmd was never sent to TCM. There is no need + * to schedule free or call free_cmd + */ + qlt_free_cmd(cmd); + vha->hw->tgt.num_qfull_cmds_alloc--; + } + } + vha->hw->tgt.num_qfull_cmds_dropped = 0; +} + +static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) +{ + uint32_t total_leaked; + + total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; + + if (vha->hw->tgt.leak_exchg_thresh_hold && + (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { + + ql_dbg(ql_dbg_tgt, vha, 0xe079, + "Chip reset due to exchange starvation: %d/%d.\n", + total_leaked, vha->hw->fw_xcb_count); + + if (IS_P3P_TYPE(vha->hw)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + +} + void qlt_free_cmd(struct qla_tgt_cmd *cmd) { struct qla_tgt_sess *sess = cmd->sess; @@ -2720,6 +2796,9 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) __func__, &cmd->se_cmd, be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); + if (!cmd->q_full) + qlt_decr_num_pend_cmds(cmd->vha); + BUG_ON(cmd->sg_mapped); if (unlikely(cmd->free_sg)) kfree(cmd->sg); @@ -3078,7 +3157,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, * level. */ if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && - (cmd->state != QLA_TGT_STATE_ABORTED)) { + (cmd->state != QLA_TGT_STATE_ABORTED)) { if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) return; } @@ -3114,6 +3193,7 @@ skip_term: dump_stack(); } + ha->tgt.tgt_ops->free_cmd(cmd); } @@ -3211,6 +3291,8 @@ out_term: */ spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); + + qlt_decr_num_pend_cmds(vha); percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -3241,6 +3323,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, memcpy(&cmd->atio, atio, sizeof(*atio)); cmd->state = QLA_TGT_STATE_NEW; cmd->tgt = vha->vha_tgt.qla_tgt; + qlt_incr_num_pend_cmds(vha); cmd->vha = vha; cmd->se_cmd.map_tag = tag; cmd->sess = sess; @@ -4184,7 +4267,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire * This function sends busy to ISP 2xxx or 24xx. */ -static void qlt_send_busy(struct scsi_qla_host *vha, +static int __qlt_send_busy(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint16_t status) { struct ctio7_to_24xx *ctio24; @@ -4196,7 +4279,7 @@ static void qlt_send_busy(struct scsi_qla_host *vha, atio->u.isp24.fcp_hdr.s_id); if (!sess) { qlt_send_term_exchange(vha, NULL, atio, 1); - return; + return 0; } /* Sending marker isn't necessary, since we called from ISR */ @@ -4205,7 +4288,7 @@ static void qlt_send_busy(struct scsi_qla_host *vha, ql_dbg(ql_dbg_io, vha, 0x3063, "qla_target(%d): %s failed: unable to allocate " "request packet", vha->vp_idx, __func__); - return; + return -ENOMEM; } pkt->entry_count = 1; @@ -4237,6 +4320,189 @@ static void qlt_send_busy(struct scsi_qla_host *vha, ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; qla2x00_start_iocbs(vha, vha->req); + return 0; +} + +/* + * This routine is used to allocate a command for either a QFull condition + * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go + * out previously. + */ +static void +qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, + struct atio_from_isp *atio, uint16_t status, int qfull) +{ + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess; + struct se_session *se_sess; + struct qla_tgt_cmd *cmd; + int tag; + + if (unlikely(tgt->tgt_stop)) { + ql_dbg(ql_dbg_io, vha, 0x300a, + "New command while device %p is shutting down\n", tgt); + return; + } + + if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { + vha->hw->tgt.num_qfull_cmds_dropped++; + if (vha->hw->tgt.num_qfull_cmds_dropped > + vha->hw->qla_stats.stat_max_qfull_cmds_dropped) + vha->hw->qla_stats.stat_max_qfull_cmds_dropped = + vha->hw->tgt.num_qfull_cmds_dropped; + + ql_dbg(ql_dbg_io, vha, 0x3068, + "qla_target(%d): %s: QFull CMD dropped[%d]\n", + vha->vp_idx, __func__, + vha->hw->tgt.num_qfull_cmds_dropped); + + qlt_chk_exch_leak_thresh_hold(vha); + return; + } + + sess = ha->tgt.tgt_ops->find_sess_by_s_id + (vha, atio->u.isp24.fcp_hdr.s_id); + if (!sess) + return; + + se_sess = sess->se_sess; + + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); + if (tag < 0) + return; + + cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; + if (!cmd) { + ql_dbg(ql_dbg_io, vha, 0x3009, + "qla_target(%d): %s: Allocation of cmd failed\n", + vha->vp_idx, __func__); + + vha->hw->tgt.num_qfull_cmds_dropped++; + if (vha->hw->tgt.num_qfull_cmds_dropped > + vha->hw->qla_stats.stat_max_qfull_cmds_dropped) + vha->hw->qla_stats.stat_max_qfull_cmds_dropped = + vha->hw->tgt.num_qfull_cmds_dropped; + + qlt_chk_exch_leak_thresh_hold(vha); + return; + } + + memset(cmd, 0, sizeof(struct qla_tgt_cmd)); + + qlt_incr_num_pend_cmds(vha); + INIT_LIST_HEAD(&cmd->cmd_list); + memcpy(&cmd->atio, atio, sizeof(*atio)); + + cmd->tgt = vha->vha_tgt.qla_tgt; + cmd->vha = vha; + cmd->reset_count = vha->hw->chip_reset; + cmd->q_full = 1; + + if (qfull) { + cmd->q_full = 1; + /* NOTE: borrowing the state field to carry the status */ + cmd->state = status; + } else + cmd->term_exchg = 1; + + list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); + + vha->hw->tgt.num_qfull_cmds_alloc++; + if (vha->hw->tgt.num_qfull_cmds_alloc > + vha->hw->qla_stats.stat_max_qfull_cmds_alloc) + vha->hw->qla_stats.stat_max_qfull_cmds_alloc = + vha->hw->tgt.num_qfull_cmds_alloc; +} + +int +qlt_free_qfull_cmds(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + struct qla_tgt_cmd *cmd, *tcmd; + struct list_head free_list; + int rc = 0; + + if (list_empty(&ha->tgt.q_full_list)) + return 0; + + INIT_LIST_HEAD(&free_list); + + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + + if (list_empty(&ha->tgt.q_full_list)) { + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + return 0; + } + + list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) { + if (cmd->q_full) + /* cmd->state is a borrowed field to hold status */ + rc = __qlt_send_busy(vha, &cmd->atio, cmd->state); + else if (cmd->term_exchg) + rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio); + + if (rc == -ENOMEM) + break; + + if (cmd->q_full) + ql_dbg(ql_dbg_io, vha, 0x3006, + "%s: busy sent for ox_id[%04x]\n", __func__, + be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); + else if (cmd->term_exchg) + ql_dbg(ql_dbg_io, vha, 0x3007, + "%s: Term exchg sent for ox_id[%04x]\n", __func__, + be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); + else + ql_dbg(ql_dbg_io, vha, 0x3008, + "%s: Unexpected cmd in QFull list %p\n", __func__, + cmd); + + list_del(&cmd->cmd_list); + list_add_tail(&cmd->cmd_list, &free_list); + + /* piggy back on hardware_lock for protection */ + vha->hw->tgt.num_qfull_cmds_alloc--; + } + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + + cmd = NULL; + + list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { + list_del(&cmd->cmd_list); + /* This cmd was never sent to TCM. There is no need + * to schedule free or call free_cmd + */ + qlt_free_cmd(cmd); + } + return rc; +} + +static void +qlt_send_busy(struct scsi_qla_host *vha, + struct atio_from_isp *atio, uint16_t status) +{ + int rc = 0; + + rc = __qlt_send_busy(vha, atio, status); + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, atio, status, 1); +} + +static int +qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, + struct atio_from_isp *atio) +{ + struct qla_hw_data *ha = vha->hw; + uint16_t status; + + if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) + return 0; + + status = temp_sam_status; + qlt_send_busy(vha, atio, status); + return 1; } /* ha->hardware_lock supposed to be held on entry */ @@ -4271,10 +4537,19 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); break; } - if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) + + + + if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { + rc = qlt_chk_qfull_thresh_hold(vha, atio); + if (rc != 0) { + tgt->irq_cmd_count--; + return; + } rc = qlt_handle_cmd_for_atio(vha, atio); - else + } else { rc = qlt_handle_task_mgmt(vha, atio); + } if (unlikely(rc != 0)) { if (rc == -ESRCH) { #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ @@ -4369,6 +4644,12 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) break; } + rc = qlt_chk_qfull_thresh_hold(vha, atio); + if (rc != 0) { + tgt->irq_cmd_count--; + return; + } + rc = qlt_handle_cmd_for_atio(vha, atio); if (unlikely(rc != 0)) { if (rc == -ESRCH) { @@ -4906,6 +5187,10 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) qlt_release(vha->vha_tgt.qla_tgt); return 0; } + + /* free left over qfull cmds */ + qlt_init_term_exchange(vha); + mutex_lock(&qla_tgt_mutex); list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); mutex_unlock(&qla_tgt_mutex); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 20e42bdb6b0f..0c768f5e885a 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -915,6 +915,8 @@ struct qla_tgt_cmd { unsigned int aborted:1; /* Needed in case of SRR */ unsigned int write_data_transferred:1; unsigned int ctx_dsd_alloced:1; + unsigned int q_full:1; + unsigned int term_exchg:1; struct scatterlist *sg; /* cmd data buffer SG vector */ int sg_cnt; /* SG segments count */ @@ -928,6 +930,7 @@ struct qla_tgt_cmd { uint16_t loop_id; /* to save extra sess dereferences */ struct qla_tgt *tgt; /* to save extra sess dereferences */ struct scsi_qla_host *vha; + struct list_head cmd_list; struct atio_from_isp atio; /* t10dif */ @@ -1091,5 +1094,6 @@ extern int qlt_stop_phase1(struct qla_tgt *); extern void qlt_stop_phase2(struct qla_tgt *); extern irqreturn_t qla83xx_msix_atio_q(int, void *); extern void qlt_83xx_iospace_config(struct qla_hw_data *); +extern int qlt_free_qfull_cmds(struct scsi_qla_host *); #endif /* __QLA_TARGET_H */ -- cgit From da6b0ace34378e928a6cba6010d7856664c46746 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 06:14:56 -0400 Subject: qla2xxx: Do not send SS_RESIDUAL_UNDER with SAM_STAT_BUSY Signed-off-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_target.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 9f248e7505ff..94b726ed21e1 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -4313,12 +4313,6 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, */ ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); ctio24->u.status1.scsi_status = cpu_to_le16(status); - ctio24->u.status1.residual = get_unaligned((uint32_t *) - &atio->u.isp24.fcp_cmnd.add_cdb[ - atio->u.isp24.fcp_cmnd.add_cdb_len]); - if (ctio24->u.status1.residual != 0) - ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; - qla2x00_start_iocbs(vha, vha->req); return 0; } -- cgit From d564a372b0047de8014614fa66f2d071815605ed Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Thu, 25 Sep 2014 06:14:57 -0400 Subject: qla2xxx: Fix hang due to cmd_kref not decrementing Signed-off-by: Quinn Tran Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_target.c | 18 ++++++------------ drivers/scsi/qla2xxx/qla_target.h | 1 + 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 94b726ed21e1..2ba44334fb84 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2409,6 +2409,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ + cmd->cmd_sent_to_fw = 1; qla2x00_start_iocbs(vha, vha->req); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2484,6 +2485,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) qlt_load_data_segments(&prm, vha); cmd->state = QLA_TGT_STATE_NEED_DATA; + cmd->cmd_sent_to_fw = 1; qla2x00_start_iocbs(vha, vha->req); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2717,19 +2719,10 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, atio, 0, 0); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + done: - /* - * Terminate exchange will tell fw to release any active CTIO - * that's in FW posession and cleanup the exchange. - * - * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still - * down at FW. Free the cmd later when CTIO comes back later - * w/aborted(0x2) status. - * - * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already - * back w/some err. Free the cmd now. - */ - if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) { + if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) || + !cmd->cmd_sent_to_fw)) { if (!ha_locked && !in_interrupt()) msleep(250); /* just in case */ @@ -3071,6 +3064,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, se_cmd = &cmd->se_cmd; tfo = se_cmd->se_tfo; + cmd->cmd_sent_to_fw = 0; if (cmd->sg_mapped) qlt_unmap_sg(vha, cmd); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 0c768f5e885a..b07b230b14cc 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -917,6 +917,7 @@ struct qla_tgt_cmd { unsigned int ctx_dsd_alloced:1; unsigned int q_full:1; unsigned int term_exchg:1; + unsigned int cmd_sent_to_fw:1; struct scatterlist *sg; /* cmd data buffer SG vector */ int sg_cnt; /* SG segments count */ -- cgit From e07f8f6547c246936b489772717b05695af53e35 Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Thu, 25 Sep 2014 06:14:58 -0400 Subject: qla2xxx: Add flags for tracing the target commands. Signed-off-by: Saurav Kashyap Signed-off-by: Giridhar Malavali Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_target.c | 30 ++++++++++++++++++++++++++---- drivers/scsi/qla2xxx/qla_target.h | 24 ++++++++++++++++++++++++ drivers/scsi/qla2xxx/tcm_qla2xxx.c | 24 ++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 2ba44334fb84..f3287e1bc554 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1723,6 +1723,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, se_cmd, cmd->tag); cmd->state = QLA_TGT_STATE_ABORTED; + cmd->cmd_flags |= BIT_6; qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); @@ -2789,10 +2790,13 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) __func__, &cmd->se_cmd, be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); + BUG_ON(cmd->cmd_in_wq); + if (!cmd->q_full) qlt_decr_num_pend_cmds(cmd->vha); BUG_ON(cmd->sg_mapped); + cmd->jiffies_at_free = get_jiffies_64(); if (unlikely(cmd->free_sg)) kfree(cmd->sg); @@ -2800,6 +2804,7 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) WARN_ON(1); return; } + cmd->jiffies_at_free = get_jiffies_64(); percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); } EXPORT_SYMBOL(qlt_free_cmd); @@ -2813,6 +2818,7 @@ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, struct qla_tgt_srr_imm *imm; tgt->ctio_srr_id++; + cmd->cmd_flags |= BIT_15; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); @@ -2998,6 +3004,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) dump_stack(); } + cmd->cmd_flags |= BIT_12; ha->tgt.tgt_ops->free_cmd(cmd); } @@ -3152,6 +3159,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, */ if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && (cmd->state != QLA_TGT_STATE_ABORTED)) { + cmd->cmd_flags |= BIT_13; if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) return; } @@ -3240,6 +3248,8 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) uint32_t data_length; int ret, fcp_task_attr, data_dir, bidi = 0; + cmd->cmd_in_wq = 0; + cmd->cmd_flags |= BIT_1; if (tgt->tgt_stop) goto out_term; @@ -3283,6 +3293,7 @@ out_term: * cmd has not sent to target yet, so pass NULL as the second * argument to qlt_send_term_exchange() and free the memory here. */ + cmd->cmd_flags |= BIT_2; spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); @@ -3430,8 +3441,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, return -ENOMEM; } + cmd->cmd_flags = 0; + cmd->jiffies_at_alloc = get_jiffies_64(); + cmd->reset_count = vha->hw->chip_reset; + cmd->cmd_in_wq = 1; + cmd->cmd_flags |= BIT_0; INIT_WORK(&cmd->work, qlt_do_work); queue_work(qla_tgt_wq, &cmd->work); return 0; @@ -3893,8 +3909,10 @@ static void qlt_handle_srr(struct scsi_qla_host *vha, qlt_send_notify_ack(vha, ntfy, 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (xmit_type & QLA_TGT_XMIT_DATA) + if (xmit_type & QLA_TGT_XMIT_DATA) { + cmd->cmd_flags |= BIT_8; qlt_rdy_to_xfer(cmd); + } } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, "qla_target(%d): SRR for out data for cmd " @@ -3912,8 +3930,10 @@ static void qlt_handle_srr(struct scsi_qla_host *vha, } /* Transmit response in case of status and data-in cases */ - if (resp) + if (resp) { + cmd->cmd_flags |= BIT_7; qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); + } return; @@ -3926,8 +3946,10 @@ out_reject: if (cmd->state == QLA_TGT_STATE_NEED_DATA) { cmd->state = QLA_TGT_STATE_DATA_IN; dump_stack(); - } else + } else { + cmd->cmd_flags |= BIT_9; qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + } spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -4041,7 +4063,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, tgt->imm_srr_id++; - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n", + ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n", vha->vp_idx); imm = kzalloc(sizeof(*imm), GFP_ATOMIC); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index b07b230b14cc..8ff330f7d6f5 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -918,6 +918,7 @@ struct qla_tgt_cmd { unsigned int q_full:1; unsigned int term_exchg:1; unsigned int cmd_sent_to_fw:1; + unsigned int cmd_in_wq:1; struct scatterlist *sg; /* cmd data buffer SG vector */ int sg_cnt; /* SG segments count */ @@ -940,6 +941,29 @@ struct qla_tgt_cmd { uint32_t blk_sz; struct crc_context *ctx; + uint64_t jiffies_at_alloc; + uint64_t jiffies_at_free; + /* BIT_0 - Atio Arrival / schedule to work + * BIT_1 - qlt_do_work + * BIT_2 - qlt_do work failed + * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending + * BIT_4 - read respond/tcm_qla2xx_queue_data_in + * BIT_5 - status respond / tcm_qla2xx_queue_status + * BIT_6 - tcm request to abort/Term exchange. + * pre_xmit_response->qlt_send_term_exchange + * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response) + * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer) + * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange) + * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data + * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work + * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd + * BIT_13 - Bad completion - + * qlt_ctio_do_completion --> qlt_term_ctio_exchange + * BIT_14 - Back end data received/sent. + * BIT_15 - SRR prepare ctio + * BIT_16 - complete free + */ + uint32_t cmd_flags; }; struct qla_tgt_sess_work_param { diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 9f954074acd9..031b2961c6b7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -390,6 +390,11 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + cmd->cmd_in_wq = 0; + + WARN_ON(cmd->cmd_flags & BIT_16); + + cmd->cmd_flags |= BIT_16; transport_generic_free_cmd(&cmd->se_cmd, 0); } @@ -400,6 +405,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work) */ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) { + cmd->cmd_in_wq = 1; INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); queue_work(tcm_qla2xxx_free_wq, &cmd->work); } @@ -409,6 +415,13 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) */ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) { + struct qla_tgt_cmd *cmd; + + if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) { + cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + cmd->cmd_flags |= BIT_14; + } + return target_put_sess_cmd(se_cmd->se_sess, se_cmd); } @@ -571,6 +584,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) * Ensure that the complete FCP WRITE payload has been received. * Otherwise return an exception via CHECK_CONDITION status. */ + cmd->cmd_in_wq = 0; + cmd->cmd_flags |= BIT_11; if (!cmd->write_data_transferred) { /* * Check if se_cmd has already been aborted via LUN_RESET, and @@ -599,6 +614,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) */ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) { + cmd->cmd_flags |= BIT_10; + cmd->cmd_in_wq = 1; INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); queue_work(tcm_qla2xxx_free_wq, &cmd->work); } @@ -642,6 +659,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + cmd->cmd_flags |= BIT_4; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); @@ -649,6 +667,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) cmd->sg_cnt = se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; cmd->offset = 0; + cmd->cmd_flags |= BIT_3; cmd->prot_sg_cnt = se_cmd->t_prot_nents; cmd->prot_sg = se_cmd->t_prot_sg; @@ -674,6 +693,11 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd->offset = 0; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); + if (cmd->cmd_flags & BIT_5) { + pr_crit("Bit_5 already set for cmd = %p.\n", cmd); + dump_stack(); + } + cmd->cmd_flags |= BIT_5; if (se_cmd->data_direction == DMA_FROM_DEVICE) { /* -- cgit From 63163e060127876f57c327c85f2890ed427a4907 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 25 Sep 2014 06:14:59 -0400 Subject: qla2xxx: Add memory barrier before ringing doorbell. Signed-off-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_target.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index f3287e1bc554..829752cfd73f 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1040,6 +1040,8 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, "qla_target(%d): Sending 24xx Notify Ack %d\n", vha->vp_idx, nack->u.isp24.status); + /* Memory Barrier */ + wmb(); qla2x00_start_iocbs(vha, vha->req); } @@ -1117,6 +1119,8 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, vha->vha_tgt.qla_tgt->abts_resp_expected++; + /* Memory Barrier */ + wmb(); qla2x00_start_iocbs(vha, vha->req); } @@ -1162,6 +1166,8 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, CTIO7_FLAGS_TERMINATE); ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); + /* Memory Barrier */ + wmb(); qla2x00_start_iocbs(vha, vha->req); qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, @@ -1333,6 +1339,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, ctio->u.status1.response_len = __constant_cpu_to_le16(8); ctio->u.status1.sense_data[0] = resp_code; + /* Memory Barrier */ + wmb(); qla2x00_start_iocbs(ha, ha->req); } @@ -2412,6 +2420,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ cmd->cmd_sent_to_fw = 1; + /* Memory Barrier */ + wmb(); qla2x00_start_iocbs(vha, vha->req); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2488,6 +2498,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) cmd->state = QLA_TGT_STATE_NEED_DATA; cmd->cmd_sent_to_fw = 1; + /* Memory Barrier */ + wmb(); qla2x00_start_iocbs(vha, vha->req); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2696,6 +2708,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, if (ctio24->u.status1.residual != 0) ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; + /* Memory Barrier */ + wmb(); qla2x00_start_iocbs(vha, vha->req); return ret; } @@ -4329,6 +4343,8 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, */ ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); ctio24->u.status1.scsi_status = cpu_to_le16(status); + /* Memory Barrier */ + wmb(); qla2x00_start_iocbs(vha, vha->req); return 0; } -- cgit From d247a70a4d67623eb738176ee703a7bb8b6c325c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 26 Sep 2014 09:49:02 +0200 Subject: ipr: fix compile failure Fix a typo in the IPR_IOASC_HW_CMD_FAILED declaration. Based on a patch from Wen Xiong . Signed-off-by: Christoph Hellwig --- drivers/scsi/ipr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 8f8960d0bd73..d0201ceb4aac 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -130,7 +130,7 @@ #define IPR_IOASC_HW_DEV_BUS_STATUS 0x04448500 #define IPR_IOASC_IOASC_MASK 0xFFFFFF00 #define IPR_IOASC_SCSI_STATUS_MASK 0x000000FF -#define IPR_IOASC_HW_CMD_FAILEd 0x046E0000 +#define IPR_IOASC_HW_CMD_FAILED 0x046E0000 #define IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT 0x05240000 #define IPR_IOASC_IR_RESOURCE_HANDLE 0x05250000 #define IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA 0x05258100 -- cgit From b8d23dc6133c495cf9161dfb36988bbbe163461d Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Thu, 25 Sep 2014 11:55:42 -0700 Subject: fcoe: extend ethtool to FC port speed mapping add support for 20 Gbit and 40 Gbit links Signed-off-by: Chris Leech Signed-off-by: Vasu Dev Signed-off-by: Christoph Hellwig --- drivers/scsi/fcoe/fcoe_transport.c | 39 ++++++++++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index 74277c20f6a5..bdc89899561a 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -96,14 +96,32 @@ int fcoe_link_speed_update(struct fc_lport *lport) struct ethtool_cmd ecmd; if (!__ethtool_get_settings(netdev, &ecmd)) { - lport->link_supported_speeds &= - ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); + lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT | + FC_PORTSPEED_10GBIT | + FC_PORTSPEED_20GBIT | + FC_PORTSPEED_40GBIT); + if (ecmd.supported & (SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full)) + SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseKX_Full)) lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; - if (ecmd.supported & SUPPORTED_10000baseT_Full) - lport->link_supported_speeds |= - FC_PORTSPEED_10GBIT; + + if (ecmd.supported & (SUPPORTED_10000baseT_Full | + SUPPORTED_10000baseKX4_Full | + SUPPORTED_10000baseKR_Full | + SUPPORTED_10000baseR_FEC)) + lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; + + if (ecmd.supported & (SUPPORTED_20000baseMLD2_Full | + SUPPORTED_20000baseKR2_Full)) + lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; + + if (ecmd.supported & (SUPPORTED_40000baseKR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseLR4_Full)) + lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; + switch (ethtool_cmd_speed(&ecmd)) { case SPEED_1000: lport->link_speed = FC_PORTSPEED_1GBIT; @@ -111,6 +129,15 @@ int fcoe_link_speed_update(struct fc_lport *lport) case SPEED_10000: lport->link_speed = FC_PORTSPEED_10GBIT; break; + case 20000: + lport->link_speed = FC_PORTSPEED_20GBIT; + break; + case 40000: + lport->link_speed = FC_PORTSPEED_40GBIT; + break; + default: + lport->link_speed = FC_PORTSPEED_UNKNOWN; + break; } return 0; } -- cgit From f4303d8fa6da702b5fe53fa91a6984941d89d514 Mon Sep 17 00:00:00 2001 From: Andreea-Cristina Bernat Date: Mon, 18 Aug 2014 17:56:22 +0300 Subject: libfc: Replace rcu_assign_pointer() with RCU_INIT_POINTER() The uses of "rcu_assign_pointer()" are NULLing out the pointers. According to RCU_INIT_POINTER()'s block comment: "1. This use of RCU_INIT_POINTER() is NULLing out the pointer" it is better to use it instead of rcu_assign_pointer() because it has a smaller overhead. The following Coccinelle semantic patch was used: @@ @@ - rcu_assign_pointer + RCU_INIT_POINTER (..., NULL) Signed-off-by: Andreea-Cristina Bernat Acked-by: Vasu Dev Signed-off-by: Christoph Hellwig --- drivers/scsi/libfc/fc_libfc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c index 8d65a51a7598..c11a638f32e6 100644 --- a/drivers/scsi/libfc/fc_libfc.c +++ b/drivers/scsi/libfc/fc_libfc.c @@ -296,9 +296,9 @@ void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov) BUG_ON(type >= FC_FC4_PROV_SIZE); mutex_lock(&fc_prov_mutex); if (prov->recv) - rcu_assign_pointer(fc_passive_prov[type], NULL); + RCU_INIT_POINTER(fc_passive_prov[type], NULL); else - rcu_assign_pointer(fc_active_prov[type], NULL); + RCU_INIT_POINTER(fc_active_prov[type], NULL); mutex_unlock(&fc_prov_mutex); synchronize_rcu(); } -- cgit From 53281edb2942c4d7abf12846b6b0591caf2bee4d Mon Sep 17 00:00:00 2001 From: John Soni Jose Date: Fri, 26 Sep 2014 15:13:55 -0400 Subject: be2iscsi : Fix kernel panic during reboot/shutdown In the reboot/shutdown path, workqueue was destroyed after the adapter resource were freed. The task associated with workqueue was getting executed after resources were freed. This lead to kernel panic. Signed-off-by: John Soni Jose Signed-off-by: Jayamohan Kallickal Signed-off-by: Christoph Hellwig --- drivers/scsi/be2iscsi/be_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index db2bd4d5311a..30d74a06b993 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5223,6 +5223,7 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba, free_irq(phba->pcidev->irq, phba); } pci_disable_msix(phba->pcidev); + cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; @@ -5244,7 +5245,6 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba, hwi_cleanup(phba); } - cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); } static void beiscsi_remove(struct pci_dev *pcidev) -- cgit From 49b552dedf63a5b4cde1b5e891ec305f16b09567 Mon Sep 17 00:00:00 2001 From: John Soni Jose Date: Fri, 26 Sep 2014 15:14:16 -0400 Subject: be2iscsi : Bump the driver version Bump the driver version Signed-off-by: John Soni Jose Signed-off-by: Jayamohan Kallickal Signed-off-by: Christoph Hellwig --- drivers/scsi/be2iscsi/be_main.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 36056311a6e4..7ee0ffc38514 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -36,7 +36,7 @@ #include #define DRV_NAME "be2iscsi" -#define BUILD_STR "10.4.74.0" +#define BUILD_STR "10.4.114.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" -- cgit From 4bfb8ebf4c21f372a8677f9aa99963985e9e6539 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 29 Sep 2014 13:55:42 -0500 Subject: iscsi_tcp: export port being used This just has iscsi_tcp support ISCSI_PARAM_LOCAL_PORT which exports the local port being used by the iscsi connection. Signed-off-by: Mike Christie Signed-off-by: Christoph Hellwig --- drivers/scsi/iscsi_tcp.c | 10 ++++++++-- drivers/scsi/libiscsi.c | 1 + 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index a669f2d11c31..427af0f24b0f 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -726,13 +726,18 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, switch(param) { case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_LOCAL_PORT: spin_lock_bh(&conn->session->frwd_lock); if (!tcp_sw_conn || !tcp_sw_conn->sock) { spin_unlock_bh(&conn->session->frwd_lock); return -ENOTCONN; } - rc = kernel_getpeername(tcp_sw_conn->sock, - (struct sockaddr *)&addr, &len); + if (param == ISCSI_PARAM_LOCAL_PORT) + rc = kernel_getsockname(tcp_sw_conn->sock, + (struct sockaddr *)&addr, &len); + else + rc = kernel_getpeername(tcp_sw_conn->sock, + (struct sockaddr *)&addr, &len); spin_unlock_bh(&conn->session->frwd_lock); if (rc) return rc; @@ -895,6 +900,7 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param) case ISCSI_PARAM_DATADGST_EN: case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_LOCAL_PORT: case ISCSI_PARAM_EXP_STATSN: case ISCSI_PARAM_PERSISTENT_ADDRESS: case ISCSI_PARAM_PERSISTENT_PORT: diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 191b59793519..0d8bc6c66650 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -3505,6 +3505,7 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, len = sprintf(buf, "%pI6\n", &sin6->sin6_addr); break; case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_LOCAL_PORT: if (sin) len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port)); else -- cgit From a41a9ad3bbf61fae0b6bfb232153da60d14fdbd9 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 29 Sep 2014 13:55:41 -0500 Subject: be2iscsi: check ip buffer before copying Dan Carpenter found a issue where be2iscsi would copy the ip from userspace to the driver buffer before checking the len of the data being copied: http://marc.info/?l=linux-scsi&m=140982651504251&w=2 This patch just has us only copy what we the driver buffer can support. Cc: Tested-by: John Soni Jose Signed-off-by: Mike Christie Signed-off-by: Christoph Hellwig --- drivers/scsi/be2iscsi/be_mgmt.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 8478506739fb..681d4e8f003a 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -943,17 +943,20 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba, if (ip_action == IP_ACTION_ADD) { memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value, - ip_param->len); + sizeof(req->ip_params.ip_record.ip_addr.addr)); if (subnet_param) memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, - subnet_param->value, subnet_param->len); + subnet_param->value, + sizeof(req->ip_params.ip_record.ip_addr.subnet_mask)); } else { memcpy(req->ip_params.ip_record.ip_addr.addr, - if_info->ip_addr.addr, ip_param->len); + if_info->ip_addr.addr, + sizeof(req->ip_params.ip_record.ip_addr.addr)); memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, - if_info->ip_addr.subnet_mask, ip_param->len); + if_info->ip_addr.subnet_mask, + sizeof(req->ip_params.ip_record.ip_addr.subnet_mask)); } rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); @@ -981,7 +984,7 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr, req->action = gtway_action; req->ip_addr.ip_type = BE2_IPV4; - memcpy(req->ip_addr.addr, gt_addr, param_len); + memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr)); return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); } -- cgit From db525fce95f0ee39102f06f8599ced3f3f3af128 Mon Sep 17 00:00:00 2001 From: Douglas Gilbert Date: Sun, 31 Aug 2014 19:09:59 -0400 Subject: scsi_debug: deadlock between completions and surprise module removal A deadlock has been reported when the completion of SCSI commands (simulated by a timer) was surprised by a module removal. This patch removes one half of the offending locks around timer deletions. This fix is applied both to stop_all_queued() which is were the deadlock was discovered and stop_queued_cmnd() which has very similar logic. This patch should be applied both to the lk 3.17 tree and Christoph's drivers-for-3.18 tree. Tested-and-reported-by: Milan Broz Signed-off-by: Douglas Gilbert Signed-off-by: Christoph Hellwig --- drivers/scsi/scsi_debug.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 1c475e9a46ab..2b6d447ad6d6 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -2743,6 +2743,13 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd) if (test_bit(k, queued_in_use_bm)) { sqcp = &queued_arr[k]; if (cmnd == sqcp->a_cmnd) { + devip = (struct sdebug_dev_info *) + cmnd->device->hostdata; + if (devip) + atomic_dec(&devip->num_in_q); + sqcp->a_cmnd = NULL; + spin_unlock_irqrestore(&queued_arr_lock, + iflags); if (scsi_debug_ndelay > 0) { if (sqcp->sd_hrtp) hrtimer_cancel( @@ -2755,18 +2762,13 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd) if (sqcp->tletp) tasklet_kill(sqcp->tletp); } - __clear_bit(k, queued_in_use_bm); - devip = (struct sdebug_dev_info *) - cmnd->device->hostdata; - if (devip) - atomic_dec(&devip->num_in_q); - sqcp->a_cmnd = NULL; - break; + clear_bit(k, queued_in_use_bm); + return 1; } } } spin_unlock_irqrestore(&queued_arr_lock, iflags); - return (k < qmax) ? 1 : 0; + return 0; } /* Deletes (stops) timers or tasklets of all queued commands */ @@ -2782,6 +2784,13 @@ static void stop_all_queued(void) if (test_bit(k, queued_in_use_bm)) { sqcp = &queued_arr[k]; if (sqcp->a_cmnd) { + devip = (struct sdebug_dev_info *) + sqcp->a_cmnd->device->hostdata; + if (devip) + atomic_dec(&devip->num_in_q); + sqcp->a_cmnd = NULL; + spin_unlock_irqrestore(&queued_arr_lock, + iflags); if (scsi_debug_ndelay > 0) { if (sqcp->sd_hrtp) hrtimer_cancel( @@ -2794,12 +2803,8 @@ static void stop_all_queued(void) if (sqcp->tletp) tasklet_kill(sqcp->tletp); } - __clear_bit(k, queued_in_use_bm); - devip = (struct sdebug_dev_info *) - sqcp->a_cmnd->device->hostdata; - if (devip) - atomic_dec(&devip->num_in_q); - sqcp->a_cmnd = NULL; + clear_bit(k, queued_in_use_bm); + spin_lock_irqsave(&queued_arr_lock, iflags); } } } -- cgit From 8e4a5da69c39788fabc9e7cf178eb62b6077c96b Mon Sep 17 00:00:00 2001 From: Sebastian Herbszt Date: Sat, 20 Sep 2014 13:37:55 +0200 Subject: scsi: fix comment in struct Scsi_Host definition Commit 1abf635 (scsi: use 64-bit value for 'max_luns') changed the order of Scsi_Host members. Update the comment to reflect this. Signed-off-by: Sebastian Herbszt Reviewed-by: Bart Van Assche Reviewed-by: Hannes Reinecke Signed-off-by: Christoph Hellwig --- include/scsi/scsi_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index d0f69a3210df..5e362489ee88 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -606,7 +606,7 @@ struct Scsi_Host { /* * These three parameters can be used to allow for wide scsi, * and for host adapters that support multiple busses - * The first two should be set to 1 more than the actual max id + * The last two should be set to 1 more than the actual max id * or lun (e.g. 8 for SCSI parallel systems). */ unsigned int max_channel; -- cgit From 45341ca3fcacc8720c425e757a627ef81b65b1ee Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Thu, 25 Sep 2014 15:32:19 +0300 Subject: scsi: fix the type for well known LUs Some devices may respond with wrong type for well-known logical units. This patch forces well-known type for devices which doesn't report it correct. Signed-off-by: Subhash Jadavani Signed-off-by: Sujit Reddy Thumma Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- drivers/scsi/scsi_scan.c | 13 +++++++++++++ include/scsi/scsi.h | 1 + 2 files changed, 14 insertions(+) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index f84c40188478..dde0c542e0b1 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -815,6 +815,19 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, } else { sdev->type = (inq_result[0] & 0x1f); sdev->removable = (inq_result[1] & 0x80) >> 7; + + /* + * some devices may respond with wrong type for + * well-known logical units. Force well-known type + * to enumerate them correctly. + */ + if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) { + sdev_printk(KERN_WARNING, sdev, + "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n", + __func__, sdev->type, (unsigned int)sdev->lun); + sdev->type = TYPE_WLUN; + } + } if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) { diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 261e708010da..d17178e6fcdd 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -333,6 +333,7 @@ static inline int scsi_status_is_good(int status) #define TYPE_RBC 0x0e #define TYPE_OSD 0x11 #define TYPE_ZBC 0x14 +#define TYPE_WLUN 0x1e /* well-known logical unit */ #define TYPE_NO_LUN 0x7f /* SCSI protocols; these are taken from SPC-3 section 7.5 */ -- cgit From 693ad5ba135d40b1379e40e928123681e2aa2c50 Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Thu, 25 Sep 2014 15:32:20 +0300 Subject: scsi: don't add scsi_device if its already visible If LLD has added scsi device (by calling scsi_add_device) before scheduling async scsi_scan_host then scsi_finish_async_scan() will end up calling scsi_sysfs_add_sdev for scsi device which was already added by LLD. This patch fixes this issue by skipping the call to scsi_sysfs_add_sdev() if it's already visible to rest of the kernel. Signed-off-by: Subhash Jadavani Signed-off-by: Dolev Raviv Reviewed-by: Hannes Reinecke Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/scsi/scsi_scan.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index dde0c542e0b1..ba3f1e8d0d57 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1756,6 +1756,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost) /* target removed before the device could be added */ if (sdev->sdev_state == SDEV_DEL) continue; + /* If device is already visible, skip adding it to sysfs */ + if (sdev->is_visible) + continue; if (!scsi_host_scan_allowed(shost) || scsi_sysfs_add_sdev(sdev) != 0) __scsi_remove_device(sdev); -- cgit From 5c0c28a84af9f9b6061bb4855a30e13d289b4ae1 Mon Sep 17 00:00:00 2001 From: Sujit Reddy Thumma Date: Thu, 25 Sep 2014 15:32:21 +0300 Subject: ufs: Allow vendor specific initialization Some vendor specific controller versions might need to configure vendor specific - registers, clocks, voltage regulators etc. to initialize the host controller UTP layer and Uni-Pro stack. Provide some common initialization operations that can be used to configure vendor specifics. The methods can be extended in future, for example, for power mode transitions. The operations are vendor/board specific and hence determined with the help of compatible property in device tree. Signed-off-by: Sujit Reddy Thumma Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/ufshcd-pci.c | 8 +- drivers/scsi/ufs/ufshcd-pltfrm.c | 29 ++++++- drivers/scsi/ufs/ufshcd.c | 161 +++++++++++++++++++++++++++++++-------- drivers/scsi/ufs/ufshcd.h | 34 ++++++++- 4 files changed, 194 insertions(+), 38 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index afaabe2aeac8..7a6edbc55f92 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -164,7 +164,13 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mmio_base = pcim_iomap_table(pdev)[0]; - err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq); + err = ufshcd_alloc_host(&pdev->dev, &hba); + if (err) { + dev_err(&pdev->dev, "Allocation failed\n"); + return err; + } + + err = ufshcd_init(hba, mmio_base, pdev->irq); if (err) { dev_err(&pdev->dev, "Initialization failed\n"); return err; diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 5e4623225422..d727b1a83e95 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -35,9 +35,24 @@ #include #include +#include #include "ufshcd.h" +static const struct of_device_id ufs_of_match[]; +static struct ufs_hba_variant_ops *get_variant_ops(struct device *dev) +{ + if (dev->of_node) { + const struct of_device_id *match; + + match = of_match_node(ufs_of_match, dev->of_node); + if (match) + return (struct ufs_hba_variant_ops *)match->data; + } + + return NULL; +} + #ifdef CONFIG_PM /** * ufshcd_pltfrm_suspend - suspend power management function @@ -138,8 +153,8 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev) mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mmio_base = devm_ioremap_resource(dev, mem_res); - if (IS_ERR(mmio_base)) { - err = PTR_ERR(mmio_base); + if (IS_ERR(*(void **)&mmio_base)) { + err = PTR_ERR(*(void **)&mmio_base); goto out; } @@ -150,10 +165,18 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev) goto out; } + err = ufshcd_alloc_host(dev, &hba); + if (err) { + dev_err(&pdev->dev, "Allocation failed\n"); + goto out; + } + + hba->vops = get_variant_ops(&pdev->dev); + pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - err = ufshcd_init(dev, &hba, mmio_base, irq); + err = ufshcd_init(hba, mmio_base, irq); if (err) { dev_err(dev, "Intialization failed\n"); goto out_disable_rpm; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index ba27215b8034..d0565b07dc8a 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3,6 +3,7 @@ * * This code is based on drivers/scsi/ufs/ufshcd.c * Copyright (C) 2011-2013 Samsung India Software Operations + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * Authors: * Santosh Yaraganavi @@ -31,6 +32,9 @@ * circumstances will the contributor of this Program be liable for * any damages of any kind arising from your use or distribution of * this program. + * + * The Linux Foundation chooses to take subject only to the GPLv2 + * license terms, and distributes only under these terms. */ #include @@ -175,13 +179,14 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) /** * ufshcd_is_device_present - Check if any device connected to * the host controller - * @reg_hcs - host controller status register value + * @hba: pointer to adapter instance * * Returns 1 if device present, 0 if no device detected */ -static inline int ufshcd_is_device_present(u32 reg_hcs) +static inline int ufshcd_is_device_present(struct ufs_hba *hba) { - return (DEVICE_PRESENT & reg_hcs) ? 1 : 0; + return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & + DEVICE_PRESENT) ? 1 : 0; } /** @@ -1798,11 +1803,10 @@ out: * @hba: per adapter instance * * To bring UFS host controller to operational state, - * 1. Check if device is present - * 2. Enable required interrupts - * 3. Configure interrupt aggregation - * 4. Program UTRL and UTMRL base addres - * 5. Configure run-stop-registers + * 1. Enable required interrupts + * 2. Configure interrupt aggregation + * 3. Program UTRL and UTMRL base addres + * 4. Configure run-stop-registers * * Returns 0 on success, non-zero value on failure */ @@ -1811,14 +1815,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) int err = 0; u32 reg; - /* check if device present */ - reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); - if (!ufshcd_is_device_present(reg)) { - dev_err(hba->dev, "cc: Device not present\n"); - err = -ENXIO; - goto out; - } - /* Enable required interrupts */ ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); @@ -1839,6 +1835,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) * UCRDY, UTMRLDY and UTRLRDY bits must be 1 * DEI, HEI bits must be 0 */ + reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); if (!(ufshcd_get_lists_status(reg))) { ufshcd_enable_run_stop_reg(hba); } else { @@ -1885,6 +1882,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) msleep(5); } + if (hba->vops && hba->vops->hce_enable_notify) + hba->vops->hce_enable_notify(hba, PRE_CHANGE); + /* start controller initialization sequence */ ufshcd_hba_start(hba); @@ -1912,6 +1912,10 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) } msleep(5); } + + if (hba->vops && hba->vops->hce_enable_notify) + hba->vops->hce_enable_notify(hba, POST_CHANGE); + return 0; } @@ -1928,12 +1932,28 @@ static int ufshcd_link_startup(struct ufs_hba *hba) /* enable UIC related interrupts */ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); + if (hba->vops && hba->vops->link_startup_notify) + hba->vops->link_startup_notify(hba, PRE_CHANGE); + ret = ufshcd_dme_link_startup(hba); if (ret) goto out; - ret = ufshcd_make_hba_operational(hba); + /* check if device is detected by inter-connect layer */ + if (!ufshcd_is_device_present(hba)) { + dev_err(hba->dev, "%s: Device not present\n", __func__); + ret = -ENXIO; + goto out; + } + + /* Include any host controller configuration via UIC commands */ + if (hba->vops && hba->vops->link_startup_notify) { + ret = hba->vops->link_startup_notify(hba, POST_CHANGE); + if (ret) + goto out; + } + ret = ufshcd_make_hba_operational(hba); out: if (ret) dev_err(hba->dev, "link startup failed %d\n", ret); @@ -3173,6 +3193,61 @@ static struct scsi_host_template ufshcd_driver_template = { .can_queue = UFSHCD_CAN_QUEUE, }; +static int ufshcd_variant_hba_init(struct ufs_hba *hba) +{ + int err = 0; + + if (!hba->vops) + goto out; + + if (hba->vops->init) { + err = hba->vops->init(hba); + if (err) + goto out; + } + + if (hba->vops->setup_clocks) { + err = hba->vops->setup_clocks(hba, true); + if (err) + goto out_exit; + } + + if (hba->vops->setup_regulators) { + err = hba->vops->setup_regulators(hba, true); + if (err) + goto out_clks; + } + + goto out; + +out_clks: + if (hba->vops->setup_clocks) + hba->vops->setup_clocks(hba, false); +out_exit: + if (hba->vops->exit) + hba->vops->exit(hba); +out: + if (err) + dev_err(hba->dev, "%s: variant %s init failed err %d\n", + __func__, hba->vops ? hba->vops->name : "", err); + return err; +} + +static void ufshcd_variant_hba_exit(struct ufs_hba *hba) +{ + if (!hba->vops) + return; + + if (hba->vops->setup_clocks) + hba->vops->setup_clocks(hba, false); + + if (hba->vops->setup_regulators) + hba->vops->setup_regulators(hba, false); + + if (hba->vops->exit) + hba->vops->exit(hba); +} + /** * ufshcd_suspend - suspend power management function * @hba: per adapter instance @@ -3257,6 +3332,8 @@ void ufshcd_remove(struct ufs_hba *hba) ufshcd_hba_stop(hba); scsi_host_put(hba->host); + + ufshcd_variant_hba_exit(hba); } EXPORT_SYMBOL_GPL(ufshcd_remove); @@ -3277,19 +3354,16 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba) } /** - * ufshcd_init - Driver initialization routine + * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) * @dev: pointer to device handle * @hba_handle: driver private handle - * @mmio_base: base register address - * @irq: Interrupt line of device * Returns 0 on success, non-zero value on failure */ -int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, - void __iomem *mmio_base, unsigned int irq) +int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) { struct Scsi_Host *host; struct ufs_hba *hba; - int err; + int err = 0; if (!dev) { dev_err(dev, @@ -3298,13 +3372,6 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, goto out_error; } - if (!mmio_base) { - dev_err(dev, - "Invalid memory reference for mmio_base is NULL\n"); - err = -ENODEV; - goto out_error; - } - host = scsi_host_alloc(&ufshcd_driver_template, sizeof(struct ufs_hba)); if (!host) { @@ -3315,9 +3382,40 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, hba = shost_priv(host); hba->host = host; hba->dev = dev; + *hba_handle = hba; + +out_error: + return err; +} +EXPORT_SYMBOL(ufshcd_alloc_host); + +/** + * ufshcd_init - Driver initialization routine + * @hba: per-adapter instance + * @mmio_base: base register address + * @irq: Interrupt line of device + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) +{ + int err; + struct Scsi_Host *host = hba->host; + struct device *dev = hba->dev; + + if (!mmio_base) { + dev_err(hba->dev, + "Invalid memory reference for mmio_base is NULL\n"); + err = -ENODEV; + goto out_error; + } + hba->mmio_base = mmio_base; hba->irq = irq; + err = ufshcd_variant_hba_init(hba); + if (err) + goto out_error; + /* Read capabilities registers */ ufshcd_hba_capabilities(hba); @@ -3395,8 +3493,6 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, goto out_remove_scsi_host; } - *hba_handle = hba; - /* Hold auto suspend until async scan completes */ pm_runtime_get_sync(dev); @@ -3408,6 +3504,7 @@ out_remove_scsi_host: scsi_remove_host(hba->host); out_disable: scsi_host_put(host); + ufshcd_variant_hba_exit(hba); out_error: return err; } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index acf318e338ed..8c6bec05283e 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -68,6 +68,8 @@ #define UFSHCD "ufshcd" #define UFSHCD_DRIVER_VERSION "0.2" +struct ufs_hba; + enum dev_cmd_type { DEV_CMD_TYPE_NOP = 0x0, DEV_CMD_TYPE_QUERY = 0x1, @@ -152,6 +154,30 @@ struct ufs_dev_cmd { struct ufs_query query; }; +#define PRE_CHANGE 0 +#define POST_CHANGE 1 +/** + * struct ufs_hba_variant_ops - variant specific callbacks + * @name: variant name + * @init: called when the driver is initialized + * @exit: called to cleanup everything done in init + * @setup_clocks: called before touching any of the controller registers + * @setup_regulators: called before accessing the host controller + * @hce_enable_notify: called before and after HCE enable bit is set to allow + * variant specific Uni-Pro initialization. + * @link_startup_notify: called before and after Link startup is carried out + * to allow variant specific Uni-Pro initialization. + */ +struct ufs_hba_variant_ops { + const char *name; + int (*init)(struct ufs_hba *); + void (*exit)(struct ufs_hba *); + int (*setup_clocks)(struct ufs_hba *, bool); + int (*setup_regulators)(struct ufs_hba *, bool); + int (*hce_enable_notify)(struct ufs_hba *, bool); + int (*link_startup_notify)(struct ufs_hba *, bool); +}; + /** * struct ufs_hba - per adapter private structure * @mmio_base: UFSHCI base register address @@ -171,6 +197,8 @@ struct ufs_dev_cmd { * @nutrs: Transfer Request Queue depth supported by controller * @nutmrs: Task Management Queue depth supported by controller * @ufs_version: UFS Version to which controller complies + * @vops: pointer to variant specific operations + * @priv: pointer to variant specific private data * @irq: Irq number of the controller * @active_uic_cmd: handle of active UIC command * @uic_cmd_mutex: mutex for uic command @@ -218,6 +246,8 @@ struct ufs_hba { int nutrs; int nutmrs; u32 ufs_version; + struct ufs_hba_variant_ops *vops; + void *priv; unsigned int irq; struct uic_command *active_uic_cmd; @@ -256,8 +286,8 @@ struct ufs_hba { #define ufshcd_readl(hba, reg) \ readl((hba)->mmio_base + (reg)) -int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * , - unsigned int); +int ufshcd_alloc_host(struct device *, struct ufs_hba **); +int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); void ufshcd_remove(struct ufs_hba *); /** -- cgit From aa497613093412ee26ef4bfa4ffec8391553dfca Mon Sep 17 00:00:00 2001 From: Sujit Reddy Thumma Date: Thu, 25 Sep 2014 15:32:22 +0300 Subject: ufs: Add regulator enable support UFS devices are powered by at most three external power supplies - - VCC - The flash memory core power supply, 2.7V to 3.6V or 1.70V to 1.95V - VCCQ - The controller and I/O power supply, 1.1V to 1.3V - VCCQ2 - Secondary controller and/or I/O power supply, 1.65V to 1.95V For some devices VCCQ or VCCQ2 are optional as they can be generated using internal LDO inside the UFS device. Add DT bindings for voltage regulators that can be controlled from host driver. Signed-off-by: Sujit Reddy Thumma Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- .../devicetree/bindings/ufs/ufshcd-pltfrm.txt | 24 +++ drivers/scsi/ufs/ufs.h | 25 +++ drivers/scsi/ufs/ufshcd-pltfrm.c | 100 +++++++++++ drivers/scsi/ufs/ufshcd.c | 193 ++++++++++++++++++++- drivers/scsi/ufs/ufshcd.h | 3 + 5 files changed, 342 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt index 20468b2a7516..65e31173dccd 100644 --- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt +++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt @@ -8,9 +8,33 @@ Required properties: - interrupts : - reg : +Optional properties: +- vcc-supply : phandle to VCC supply regulator node +- vccq-supply : phandle to VCCQ supply regulator node +- vccq2-supply : phandle to VCCQ2 supply regulator node +- vcc-supply-1p8 : For embedded UFS devices, valid VCC range is 1.7-1.95V + or 2.7-3.6V. This boolean property when set, specifies + to use low voltage range of 1.7-1.95V. Note for external + UFS cards this property is invalid and valid VCC range is + always 2.7-3.6V. +- vcc-max-microamp : specifies max. load that can be drawn from vcc supply +- vccq-max-microamp : specifies max. load that can be drawn from vccq supply +- vccq2-max-microamp : specifies max. load that can be drawn from vccq2 supply + +Note: If above properties are not defined it can be assumed that the supply +regulators are always on. + Example: ufshc@0xfc598000 { compatible = "jedec,ufs-1.1"; reg = <0xfc598000 0x800>; interrupts = <0 28 0>; + + vcc-supply = <&xxx_reg1>; + vcc-supply-1p8; + vccq-supply = <&xxx_reg2>; + vccq2-supply = <&xxx_reg3>; + vcc-max-microamp = 500000; + vccq-max-microamp = 200000; + vccq2-max-microamp = 200000; }; diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index fafcf5e354c6..729ce7d62d1e 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -362,4 +362,29 @@ struct ufs_query_res { struct utp_upiu_query upiu_res; }; +#define UFS_VREG_VCC_MIN_UV 2700000 /* uV */ +#define UFS_VREG_VCC_MAX_UV 3600000 /* uV */ +#define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */ +#define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */ +#define UFS_VREG_VCCQ_MIN_UV 1100000 /* uV */ +#define UFS_VREG_VCCQ_MAX_UV 1300000 /* uV */ +#define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */ +#define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */ + +struct ufs_vreg { + struct regulator *reg; + const char *name; + bool enabled; + int min_uV; + int max_uV; + int min_uA; + int max_uA; +}; + +struct ufs_vreg_info { + struct ufs_vreg *vcc; + struct ufs_vreg *vccq; + struct ufs_vreg *vccq2; +}; + #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index d727b1a83e95..51e47c429a1f 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -53,6 +53,99 @@ static struct ufs_hba_variant_ops *get_variant_ops(struct device *dev) return NULL; } +#define MAX_PROP_SIZE 32 +static int ufshcd_populate_vreg(struct device *dev, const char *name, + struct ufs_vreg **out_vreg) +{ + int ret = 0; + char prop_name[MAX_PROP_SIZE]; + struct ufs_vreg *vreg = NULL; + struct device_node *np = dev->of_node; + + if (!np) { + dev_err(dev, "%s: non DT initialization\n", __func__); + goto out; + } + + snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name); + if (!of_parse_phandle(np, prop_name, 0)) { + dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n", + __func__, prop_name); + goto out; + } + + vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); + if (!vreg) { + dev_err(dev, "No memory for %s regulator\n", name); + goto out; + } + + vreg->name = kstrdup(name, GFP_KERNEL); + + snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name); + ret = of_property_read_u32(np, prop_name, &vreg->max_uA); + if (ret) { + dev_err(dev, "%s: unable to find %s err %d\n", + __func__, prop_name, ret); + goto out_free; + } + + vreg->min_uA = 0; + if (!strcmp(name, "vcc")) { + if (of_property_read_bool(np, "vcc-supply-1p8")) { + vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV; + vreg->max_uV = UFS_VREG_VCC_1P8_MAX_UV; + } else { + vreg->min_uV = UFS_VREG_VCC_MIN_UV; + vreg->max_uV = UFS_VREG_VCC_MAX_UV; + } + } else if (!strcmp(name, "vccq")) { + vreg->min_uV = UFS_VREG_VCCQ_MIN_UV; + vreg->max_uV = UFS_VREG_VCCQ_MAX_UV; + } else if (!strcmp(name, "vccq2")) { + vreg->min_uV = UFS_VREG_VCCQ2_MIN_UV; + vreg->max_uV = UFS_VREG_VCCQ2_MAX_UV; + } + + goto out; + +out_free: + devm_kfree(dev, vreg); + vreg = NULL; +out: + if (!ret) + *out_vreg = vreg; + return ret; +} + +/** + * ufshcd_parse_regulator_info - get regulator info from device tree + * @hba: per adapter instance + * + * Get regulator info from device tree for vcc, vccq, vccq2 power supplies. + * If any of the supplies are not defined it is assumed that they are always-on + * and hence return zero. If the property is defined but parsing is failed + * then return corresponding error. + */ +static int ufshcd_parse_regulator_info(struct ufs_hba *hba) +{ + int err; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + err = ufshcd_populate_vreg(dev, "vcc", &info->vcc); + if (err) + goto out; + + err = ufshcd_populate_vreg(dev, "vccq", &info->vccq); + if (err) + goto out; + + err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2); +out: + return err; +} + #ifdef CONFIG_PM /** * ufshcd_pltfrm_suspend - suspend power management function @@ -173,6 +266,13 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev) hba->vops = get_variant_ops(&pdev->dev); + err = ufshcd_parse_regulator_info(hba); + if (err) { + dev_err(&pdev->dev, "%s: regulator init failed %d\n", + __func__, err); + goto out; + } + pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index d0565b07dc8a..ef8519e70e97 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -68,6 +68,16 @@ /* Interrupt aggregation default timeout, unit: 40us */ #define INT_AGGR_DEF_TO 0x02 +#define ufshcd_toggle_vreg(_dev, _vreg, _on) \ + ({ \ + int _ret; \ + if (_on) \ + _ret = ufshcd_enable_vreg(_dev, _vreg); \ + else \ + _ret = ufshcd_disable_vreg(_dev, _vreg); \ + _ret; \ + }) + enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, @@ -3193,6 +3203,153 @@ static struct scsi_host_template ufshcd_driver_template = { .can_queue = UFSHCD_CAN_QUEUE, }; +static int ufshcd_config_vreg(struct device *dev, + struct ufs_vreg *vreg, bool on) +{ + int ret = 0; + struct regulator *reg = vreg->reg; + const char *name = vreg->name; + int min_uV, uA_load; + + BUG_ON(!vreg); + + if (regulator_count_voltages(reg) > 0) { + min_uV = on ? vreg->min_uV : 0; + ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); + if (ret) { + dev_err(dev, "%s: %s set voltage failed, err=%d\n", + __func__, name, ret); + goto out; + } + + uA_load = on ? vreg->max_uA : 0; + ret = regulator_set_optimum_mode(reg, uA_load); + if (ret >= 0) { + /* + * regulator_set_optimum_mode() returns new regulator + * mode upon success. + */ + ret = 0; + } else { + dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n", + __func__, name, uA_load, ret); + goto out; + } + } +out: + return ret; +} + +static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg || vreg->enabled) + goto out; + + ret = ufshcd_config_vreg(dev, vreg, true); + if (!ret) + ret = regulator_enable(vreg->reg); + + if (!ret) + vreg->enabled = true; + else + dev_err(dev, "%s: %s enable failed, err=%d\n", + __func__, vreg->name, ret); +out: + return ret; +} + +static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg || !vreg->enabled) + goto out; + + ret = regulator_disable(vreg->reg); + + if (!ret) { + /* ignore errors on applying disable config */ + ufshcd_config_vreg(dev, vreg, false); + vreg->enabled = false; + } else { + dev_err(dev, "%s: %s disable failed, err=%d\n", + __func__, vreg->name, ret); + } +out: + return ret; +} + +static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) +{ + int ret = 0; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + if (!info) + goto out; + + ret = ufshcd_toggle_vreg(dev, info->vcc, on); + if (ret) + goto out; + + ret = ufshcd_toggle_vreg(dev, info->vccq, on); + if (ret) + goto out; + + ret = ufshcd_toggle_vreg(dev, info->vccq2, on); + if (ret) + goto out; + +out: + if (ret) { + ufshcd_toggle_vreg(dev, info->vccq2, false); + ufshcd_toggle_vreg(dev, info->vccq, false); + ufshcd_toggle_vreg(dev, info->vcc, false); + } + return ret; +} + +static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg) + goto out; + + vreg->reg = devm_regulator_get(dev, vreg->name); + if (IS_ERR(vreg->reg)) { + ret = PTR_ERR(vreg->reg); + dev_err(dev, "%s: %s get failed, err=%d\n", + __func__, vreg->name, ret); + } +out: + return ret; +} + +static int ufshcd_init_vreg(struct ufs_hba *hba) +{ + int ret = 0; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + if (!info) + goto out; + + ret = ufshcd_get_vreg(dev, info->vcc); + if (ret) + goto out; + + ret = ufshcd_get_vreg(dev, info->vccq); + if (ret) + goto out; + + ret = ufshcd_get_vreg(dev, info->vccq2); +out: + return ret; +} + static int ufshcd_variant_hba_init(struct ufs_hba *hba) { int err = 0; @@ -3248,6 +3405,36 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba) hba->vops->exit(hba); } +static int ufshcd_hba_init(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_init_vreg(hba); + if (err) + goto out; + + err = ufshcd_setup_vreg(hba, true); + if (err) + goto out; + + err = ufshcd_variant_hba_init(hba); + if (err) + goto out_disable_vreg; + + goto out; + +out_disable_vreg: + ufshcd_setup_vreg(hba, false); +out: + return err; +} + +static void ufshcd_hba_exit(struct ufs_hba *hba) +{ + ufshcd_variant_hba_exit(hba); + ufshcd_setup_vreg(hba, false); +} + /** * ufshcd_suspend - suspend power management function * @hba: per adapter instance @@ -3333,7 +3520,7 @@ void ufshcd_remove(struct ufs_hba *hba) scsi_host_put(hba->host); - ufshcd_variant_hba_exit(hba); + ufshcd_hba_exit(hba); } EXPORT_SYMBOL_GPL(ufshcd_remove); @@ -3412,7 +3599,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->mmio_base = mmio_base; hba->irq = irq; - err = ufshcd_variant_hba_init(hba); + err = ufshcd_hba_init(hba); if (err) goto out_error; @@ -3504,7 +3691,7 @@ out_remove_scsi_host: scsi_remove_host(hba->host); out_disable: scsi_host_put(host); - ufshcd_variant_hba_exit(hba); + ufshcd_hba_exit(hba); out_error: return err; } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 8c6bec05283e..c0232f95f5b5 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -52,6 +52,7 @@ #include #include #include +#include #include #include @@ -219,6 +220,7 @@ struct ufs_hba_variant_ops { * @saved_uic_err: sticky UIC error mask * @dev_cmd: ufs device management command information * @auto_bkops_enabled: to track whether bkops is enabled in device + * @vreg_info: UFS device voltage regulator information */ struct ufs_hba { void __iomem *mmio_base; @@ -279,6 +281,7 @@ struct ufs_hba { struct ufs_dev_cmd dev_cmd; bool auto_bkops_enabled; + struct ufs_vreg_info vreg_info; }; #define ufshcd_writel(hba, val, reg) \ -- cgit From c6e79dacd86fd7ddd452fa52b3f4ca996db31e49 Mon Sep 17 00:00:00 2001 From: Sujit Reddy Thumma Date: Thu, 25 Sep 2014 15:32:23 +0300 Subject: ufs: Add clock initialization support Add generic clock initialization support for UFSHCD platform driver. The clock info is read from device tree using standard clock bindings. A generic max-clock-frequency-hz property is defined to save information on maximum operating clock frequency the h/w supports. Signed-off-by: Sujit Reddy Thumma Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- .../devicetree/bindings/ufs/ufshcd-pltfrm.txt | 15 +++- drivers/scsi/ufs/ufshcd-pci.c | 2 + drivers/scsi/ufs/ufshcd-pltfrm.c | 71 +++++++++++++++++ drivers/scsi/ufs/ufshcd.c | 89 +++++++++++++++++++++- drivers/scsi/ufs/ufshcd.h | 18 +++++ 5 files changed, 192 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt index 65e31173dccd..b0f791a8b28d 100644 --- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt +++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt @@ -21,8 +21,17 @@ Optional properties: - vccq-max-microamp : specifies max. load that can be drawn from vccq supply - vccq2-max-microamp : specifies max. load that can be drawn from vccq2 supply +- clocks : List of phandle and clock specifier pairs +- clock-names : List of clock input name strings sorted in the same + order as the clocks property. +- max-clock-frequency-hz : List of maximum operating frequency stored in the same + order as the clocks property. If this property is not + defined or a value in the array is "0" then it is assumed + that the frequency is set by the parent clock or a + fixed rate clock source. + Note: If above properties are not defined it can be assumed that the supply -regulators are always on. +regulators or clocks are always on. Example: ufshc@0xfc598000 { @@ -37,4 +46,8 @@ Example: vcc-max-microamp = 500000; vccq-max-microamp = 200000; vccq2-max-microamp = 200000; + + clocks = <&core 0>, <&ref 0>, <&iface 0>; + clock-names = "core_clk", "ref_clk", "iface_clk"; + max-clock-frequency-hz = <100000000 19200000 0>; }; diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 7a6edbc55f92..2a26faa95b77 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -170,6 +170,8 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } + INIT_LIST_HEAD(&hba->clk_list_head); + err = ufshcd_init(hba, mmio_base, pdev->irq); if (err) { dev_err(&pdev->dev, "Initialization failed\n"); diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 51e47c429a1f..642d80fe1f80 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -53,6 +53,71 @@ static struct ufs_hba_variant_ops *get_variant_ops(struct device *dev) return NULL; } +static int ufshcd_parse_clock_info(struct ufs_hba *hba) +{ + int ret = 0; + int cnt; + int i; + struct device *dev = hba->dev; + struct device_node *np = dev->of_node; + char *name; + u32 *clkfreq = NULL; + struct ufs_clk_info *clki; + + if (!np) + goto out; + + INIT_LIST_HEAD(&hba->clk_list_head); + + cnt = of_property_count_strings(np, "clock-names"); + if (!cnt || (cnt == -EINVAL)) { + dev_info(dev, "%s: Unable to find clocks, assuming enabled\n", + __func__); + } else if (cnt < 0) { + dev_err(dev, "%s: count clock strings failed, err %d\n", + __func__, cnt); + ret = cnt; + } + + if (cnt <= 0) + goto out; + + clkfreq = kzalloc(cnt * sizeof(*clkfreq), GFP_KERNEL); + if (!clkfreq) { + ret = -ENOMEM; + dev_err(dev, "%s: memory alloc failed\n", __func__); + goto out; + } + + ret = of_property_read_u32_array(np, + "max-clock-frequency-hz", clkfreq, cnt); + if (ret && (ret != -EINVAL)) { + dev_err(dev, "%s: invalid max-clock-frequency-hz property, %d\n", + __func__, ret); + goto out; + } + + for (i = 0; i < cnt; i++) { + ret = of_property_read_string_index(np, + "clock-names", i, (const char **)&name); + if (ret) + goto out; + + clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL); + if (!clki) { + ret = -ENOMEM; + goto out; + } + + clki->max_freq = clkfreq[i]; + clki->name = kstrdup(name, GFP_KERNEL); + list_add_tail(&clki->list, &hba->clk_list_head); + } +out: + kfree(clkfreq); + return ret; +} + #define MAX_PROP_SIZE 32 static int ufshcd_populate_vreg(struct device *dev, const char *name, struct ufs_vreg **out_vreg) @@ -266,6 +331,12 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev) hba->vops = get_variant_ops(&pdev->dev); + err = ufshcd_parse_clock_info(hba); + if (err) { + dev_err(&pdev->dev, "%s: clock parse failed %d\n", + __func__, err); + goto out; + } err = ufshcd_parse_regulator_info(hba); if (err) { dev_err(&pdev->dev, "%s: regulator init failed %d\n", diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index ef8519e70e97..b03370292070 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3350,6 +3350,80 @@ out: return ret; } +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + + if (!head || list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + if (on && !clki->enabled) { + ret = clk_prepare_enable(clki->clk); + if (ret) { + dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", + __func__, clki->name, ret); + goto out; + } + } else if (!on && clki->enabled) { + clk_disable_unprepare(clki->clk); + } + clki->enabled = on; + dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, + clki->name, on ? "en" : "dis"); + } + } +out: + if (ret) { + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) + clk_disable_unprepare(clki->clk); + } + } + return ret; +} + +static int ufshcd_init_clocks(struct ufs_hba *hba) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct device *dev = hba->dev; + struct list_head *head = &hba->clk_list_head; + + if (!head || list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!clki->name) + continue; + + clki->clk = devm_clk_get(dev, clki->name); + if (IS_ERR(clki->clk)) { + ret = PTR_ERR(clki->clk); + dev_err(dev, "%s: %s clk get failed, %d\n", + __func__, clki->name, ret); + goto out; + } + + if (clki->max_freq) { + ret = clk_set_rate(clki->clk, clki->max_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->max_freq, ret); + goto out; + } + } + dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, + clki->name, clk_get_rate(clki->clk)); + } +out: + return ret; +} + static int ufshcd_variant_hba_init(struct ufs_hba *hba) { int err = 0; @@ -3409,14 +3483,22 @@ static int ufshcd_hba_init(struct ufs_hba *hba) { int err; - err = ufshcd_init_vreg(hba); + err = ufshcd_init_clocks(hba); if (err) goto out; - err = ufshcd_setup_vreg(hba, true); + err = ufshcd_setup_clocks(hba, true); if (err) goto out; + err = ufshcd_init_vreg(hba); + if (err) + goto out_disable_clks; + + err = ufshcd_setup_vreg(hba, true); + if (err) + goto out_disable_clks; + err = ufshcd_variant_hba_init(hba); if (err) goto out_disable_vreg; @@ -3425,6 +3507,8 @@ static int ufshcd_hba_init(struct ufs_hba *hba) out_disable_vreg: ufshcd_setup_vreg(hba, false); +out_disable_clks: + ufshcd_setup_clocks(hba, false); out: return err; } @@ -3433,6 +3517,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) { ufshcd_variant_hba_exit(hba); ufshcd_setup_vreg(hba, false); + ufshcd_setup_clocks(hba, false); } /** diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index c0232f95f5b5..bc0f7ed02605 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -155,6 +155,22 @@ struct ufs_dev_cmd { struct ufs_query query; }; +/** + * struct ufs_clk_info - UFS clock related info + * @list: list headed by hba->clk_list_head + * @clk: clock node + * @name: clock name + * @max_freq: maximum frequency supported by the clock + * @enabled: variable to check against multiple enable/disable + */ +struct ufs_clk_info { + struct list_head list; + struct clk *clk; + const char *name; + u32 max_freq; + bool enabled; +}; + #define PRE_CHANGE 0 #define POST_CHANGE 1 /** @@ -221,6 +237,7 @@ struct ufs_hba_variant_ops { * @dev_cmd: ufs device management command information * @auto_bkops_enabled: to track whether bkops is enabled in device * @vreg_info: UFS device voltage regulator information + * @clk_list_head: UFS host controller clocks list node head */ struct ufs_hba { void __iomem *mmio_base; @@ -282,6 +299,7 @@ struct ufs_hba { bool auto_bkops_enabled; struct ufs_vreg_info vreg_info; + struct list_head clk_list_head; }; #define ufshcd_writel(hba, val, reg) \ -- cgit From 6a771a656041f404fae143e5d753d37f5c0688e7 Mon Sep 17 00:00:00 2001 From: Raviv Shvili Date: Thu, 25 Sep 2014 15:32:24 +0300 Subject: ufs: add voting support for host controller power Add the support for voting of the regulator powering the host controller logic. Signed-off-by: Raviv Shvili Signed-off-by: Subhash Jadavani Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- .../devicetree/bindings/ufs/ufshcd-pltfrm.txt | 4 +++ drivers/scsi/ufs/ufs.h | 1 + drivers/scsi/ufs/ufshcd-pltfrm.c | 9 +++++ drivers/scsi/ufs/ufshcd.c | 42 ++++++++++++++++++++-- 4 files changed, 54 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt index b0f791a8b28d..fb1234e0532c 100644 --- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt +++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt @@ -9,6 +9,7 @@ Required properties: - reg : Optional properties: +- vdd-hba-supply : phandle to UFS host controller supply regulator node - vcc-supply : phandle to VCC supply regulator node - vccq-supply : phandle to VCCQ supply regulator node - vccq2-supply : phandle to VCCQ2 supply regulator node @@ -20,6 +21,7 @@ Optional properties: - vcc-max-microamp : specifies max. load that can be drawn from vcc supply - vccq-max-microamp : specifies max. load that can be drawn from vccq supply - vccq2-max-microamp : specifies max. load that can be drawn from vccq2 supply +- -fixed-regulator : boolean property specifying that -supply is a fixed regulator - clocks : List of phandle and clock specifier pairs - clock-names : List of clock input name strings sorted in the same @@ -39,6 +41,8 @@ Example: reg = <0xfc598000 0x800>; interrupts = <0 28 0>; + vdd-hba-supply = <&xxx_reg0>; + vdd-hba-fixed-regulator; vcc-supply = <&xxx_reg1>; vcc-supply-1p8; vccq-supply = <&xxx_reg2>; diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 729ce7d62d1e..9bb691962c47 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -385,6 +385,7 @@ struct ufs_vreg_info { struct ufs_vreg *vcc; struct ufs_vreg *vccq; struct ufs_vreg *vccq2; + struct ufs_vreg *vdd_hba; }; #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 642d80fe1f80..dde4e6e3be70 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -147,6 +147,11 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, vreg->name = kstrdup(name, GFP_KERNEL); + /* if fixed regulator no need further initialization */ + snprintf(prop_name, MAX_PROP_SIZE, "%s-fixed-regulator", name); + if (of_property_read_bool(np, prop_name)) + goto out; + snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name); ret = of_property_read_u32(np, prop_name, &vreg->max_uA); if (ret) { @@ -198,6 +203,10 @@ static int ufshcd_parse_regulator_info(struct ufs_hba *hba) struct device *dev = hba->dev; struct ufs_vreg_info *info = &hba->vreg_info; + err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba); + if (err) + goto out; + err = ufshcd_populate_vreg(dev, "vcc", &info->vcc); if (err) goto out; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index b03370292070..26301b8325e8 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3311,6 +3311,16 @@ out: return ret; } +static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + + if (info) + return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); + + return 0; +} + static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) { int ret = 0; @@ -3350,6 +3360,16 @@ out: return ret; } +static int ufshcd_init_hba_vreg(struct ufs_hba *hba) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + + if (info) + return ufshcd_get_vreg(hba->dev, info->vdd_hba); + + return 0; +} + static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) { int ret = 0; @@ -3483,14 +3503,29 @@ static int ufshcd_hba_init(struct ufs_hba *hba) { int err; - err = ufshcd_init_clocks(hba); + /* + * Handle host controller power separately from the UFS device power + * rails as it will help controlling the UFS host controller power + * collapse easily which is different than UFS device power collapse. + * Also, enable the host controller power before we go ahead with rest + * of the initialization here. + */ + err = ufshcd_init_hba_vreg(hba); if (err) goto out; - err = ufshcd_setup_clocks(hba, true); + err = ufshcd_setup_hba_vreg(hba, true); if (err) goto out; + err = ufshcd_init_clocks(hba); + if (err) + goto out_disable_hba_vreg; + + err = ufshcd_setup_clocks(hba, true); + if (err) + goto out_disable_hba_vreg; + err = ufshcd_init_vreg(hba); if (err) goto out_disable_clks; @@ -3509,6 +3544,8 @@ out_disable_vreg: ufshcd_setup_vreg(hba, false); out_disable_clks: ufshcd_setup_clocks(hba, false); +out_disable_hba_vreg: + ufshcd_setup_hba_vreg(hba, false); out: return err; } @@ -3518,6 +3555,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) ufshcd_variant_hba_exit(hba); ufshcd_setup_vreg(hba, false); ufshcd_setup_clocks(hba, false); + ufshcd_setup_hba_vreg(hba, false); } /** -- cgit From da461cec9a1ff0a074cd1db47598ba36a1686ef4 Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Thu, 25 Sep 2014 15:32:25 +0300 Subject: ufs: refactor query descriptor API support Currently reading query descriptor is more tightened to each descriptor type. This patch generalize the approach and allows reading any parameter from any query descriptor. Signed-off-by: Subhash Jadavani Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/ufs.h | 25 ++++++- drivers/scsi/ufs/ufshcd.c | 167 ++++++++++++++++++++++++++++++++++++---------- 2 files changed, 153 insertions(+), 39 deletions(-) diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 9bb691962c47..f76a304e7408 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -50,6 +50,8 @@ cpu_to_be32((byte3 << 24) | (byte2 << 16) |\ (byte1 << 8) | (byte0)) +#define UFS_UPIU_MAX_GENERAL_LUN 8 + /* * UFS Protocol Information Unit related definitions */ @@ -129,10 +131,29 @@ enum desc_idn { QUERY_DESC_IDN_RFU_1 = 0x6, QUERY_DESC_IDN_GEOMETRY = 0x7, QUERY_DESC_IDN_POWER = 0x8, - QUERY_DESC_IDN_RFU_2 = 0x9, + QUERY_DESC_IDN_MAX, +}; + +enum desc_header_offset { + QUERY_DESC_LENGTH_OFFSET = 0x00, + QUERY_DESC_DESC_TYPE_OFFSET = 0x01, +}; + +enum ufs_desc_max_size { + QUERY_DESC_DEVICE_MAX_SIZE = 0x1F, + QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90, + QUERY_DESC_UNIT_MAX_SIZE = 0x23, + QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06, + /* + * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes + * of descriptor header. + */ + QUERY_DESC_STRING_MAX_SIZE = 0xFE, + QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44, + QUERY_DESC_POWER_MAX_SIZE = 0x62, + QUERY_DESC_RFU_MAX_SIZE = 0x00, }; -#define UNIT_DESC_MAX_SIZE 0x22 /* Unit descriptor parameters offsets in bytes*/ enum unit_desc_param { UNIT_DESC_PARAM_LEN = 0x0, diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 26301b8325e8..3f2b30db80cc 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -78,6 +78,19 @@ _ret; \ }) +static u32 ufs_query_desc_max_size[] = { + QUERY_DESC_DEVICE_MAX_SIZE, + QUERY_DESC_CONFIGURAION_MAX_SIZE, + QUERY_DESC_UNIT_MAX_SIZE, + QUERY_DESC_RFU_MAX_SIZE, + QUERY_DESC_INTERCONNECT_MAX_SIZE, + QUERY_DESC_STRING_MAX_SIZE, + QUERY_DESC_RFU_MAX_SIZE, + QUERY_DESC_GEOMETRY_MAZ_SIZE, + QUERY_DESC_POWER_MAX_SIZE, + QUERY_DESC_RFU_MAX_SIZE, +}; + enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, @@ -124,8 +137,6 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba); static void ufshcd_async_scan(void *data, async_cookie_t cookie); static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); -static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba, - struct scsi_device *sdev); /* * ufshcd_wait_for_register - wait for register value to change @@ -1392,6 +1403,115 @@ out: return err; } +/** + * ufshcd_read_desc_param - read the specified descriptor parameter + * @hba: Pointer to adapter instance + * @desc_id: descriptor idn value + * @desc_index: descriptor index + * @param_offset: offset of the parameter to read + * @param_read_buf: pointer to buffer where parameter would be read + * @param_size: sizeof(param_read_buf) + * + * Return 0 in case of success, non-zero otherwise + */ +static int ufshcd_read_desc_param(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + u32 param_offset, + u8 *param_read_buf, + u32 param_size) +{ + int ret; + u8 *desc_buf; + u32 buff_len; + bool is_kmalloc = true; + + /* safety checks */ + if (desc_id >= QUERY_DESC_IDN_MAX) + return -EINVAL; + + buff_len = ufs_query_desc_max_size[desc_id]; + if ((param_offset + param_size) > buff_len) + return -EINVAL; + + if (!param_offset && (param_size == buff_len)) { + /* memory space already available to hold full descriptor */ + desc_buf = param_read_buf; + is_kmalloc = false; + } else { + /* allocate memory to hold full descriptor */ + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) + return -ENOMEM; + } + + ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, desc_buf, + &buff_len); + + if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || + (desc_buf[QUERY_DESC_LENGTH_OFFSET] != + ufs_query_desc_max_size[desc_id]) + || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) { + dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d", + __func__, desc_id, param_offset, buff_len, ret); + if (!ret) + ret = -EINVAL; + + goto out; + } + + if (is_kmalloc) + memcpy(param_read_buf, &desc_buf[param_offset], param_size); +out: + if (is_kmalloc) + kfree(desc_buf); + return ret; +} + +static inline int ufshcd_read_desc(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + u8 *buf, + u32 size) +{ + return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); +} + +static inline int ufshcd_read_power_desc(struct ufs_hba *hba, + u8 *buf, + u32 size) +{ + return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); +} + +/** + * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter + * @hba: Pointer to adapter instance + * @lun: lun id + * @param_offset: offset of the parameter to read + * @param_read_buf: pointer to buffer where parameter would be read + * @param_size: sizeof(param_read_buf) + * + * Return 0 in case of success, non-zero otherwise + */ +static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, + int lun, + enum unit_desc_param param_offset, + u8 *param_read_buf, + u32 param_size) +{ + /* + * Unit descriptors are only available for general purpose LUs (LUN id + * from 0 to 7) and RPMB Well known LU. + */ + if (lun >= UFS_UPIU_MAX_GENERAL_LUN) + return -EOPNOTSUPP; + + return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, + param_offset, param_read_buf, param_size); +} + /** * ufshcd_memory_alloc - allocate memory for host memory space data structures * @hba: per adapter instance @@ -2011,7 +2131,8 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) static int ufshcd_slave_alloc(struct scsi_device *sdev) { struct ufs_hba *hba; - int lun_qdepth; + u8 lun_qdepth; + int ret; hba = shost_priv(sdev->host); sdev->tagged_supported = 1; @@ -2026,8 +2147,12 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) /* REPORT SUPPORTED OPERATION CODES is not supported */ sdev->no_report_opcodes = 1; - lun_qdepth = ufshcd_read_sdev_qdepth(hba, sdev); - if (lun_qdepth <= 0) + ret = ufshcd_read_unit_desc_param(hba, + sdev->lun, + UNIT_DESC_PARAM_LU_Q_DEPTH, + &lun_qdepth, + sizeof(lun_qdepth)); + if (!ret || !lun_qdepth) /* eventually, we can figure out the real queue depth */ lun_qdepth = hba->nutrs; else @@ -3117,38 +3242,6 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) return err; } -/** - * ufshcd_read_sdev_qdepth - read the lun command queue depth - * @hba: Pointer to adapter instance - * @sdev: pointer to SCSI device - * - * Return in case of success the lun's queue depth else error. - */ -static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba, - struct scsi_device *sdev) -{ - int ret; - int buff_len = UNIT_DESC_MAX_SIZE; - u8 desc_buf[UNIT_DESC_MAX_SIZE]; - - ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, - QUERY_DESC_IDN_UNIT, sdev->lun, 0, desc_buf, &buff_len); - - if (ret || (buff_len < UNIT_DESC_PARAM_LU_Q_DEPTH)) { - dev_err(hba->dev, - "%s:Failed reading unit descriptor. len = %d ret = %d" - , __func__, buff_len, ret); - if (!ret) - ret = -EINVAL; - - goto out; - } - - ret = desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH] & 0xFF; -out: - return ret; -} - /** * ufshcd_async_scan - asynchronous execution for link startup * @data: data pointer to pass to this function -- cgit From 1d337ec2f35e69a046dab0cc77e64e68d1cdcd8b Mon Sep 17 00:00:00 2001 From: Sujit Reddy Thumma Date: Thu, 25 Sep 2014 15:32:26 +0300 Subject: ufs: improve init sequence In ->hce_enable_notify() callback the vendor specific initialization may carry out additional DME configuration using UIC commands and hence the UIC command completion interrupt enable bit should be set before the post reset notification. Add retries if the link-startup fails. This is required since due to hardware timing issues, the Uni-Pro link-startup might fail. The UFS HCI recovery procedure contradicts the Uni-Pro sequence. The UFS HCI specifies to resend DME_LINKSTARTUP command after IS.ULLS (link-lost interrupt) is received. The Uni-Pro specifies that if link-startup fails the link is in "down" state. The link-lost is indicated to the DME user only when the link is up. Hence, the UFS HCI recovery procedure of waiting for IS.ULLS and retrying link-startup may not work properly. At the end, if detection fails, power off (disable clocks, regulators, phy) if the UFS device detection fails. This saves power while UFS device is not embedded into the system. Signed-off-by: Sujit Reddy Thumma Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/ufshcd.c | 101 +++++++++++++++++++++++++++++++++------------- drivers/scsi/ufs/ufshcd.h | 2 + 2 files changed, 75 insertions(+), 28 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 3f2b30db80cc..af29d4c0a416 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -62,6 +62,12 @@ /* Task management command timeout */ #define TM_CMD_TIMEOUT 100 /* msecs */ +/* maximum number of link-startup retries */ +#define DME_LINKSTARTUP_RETRIES 3 + +/* maximum number of reset retries before giving up */ +#define MAX_HOST_RESET_RETRIES 5 + /* Expose the flag value from utp_upiu_query.value */ #define MASK_QUERY_UPIU_FLAG_LOC 0xFF @@ -137,6 +143,8 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba); static void ufshcd_async_scan(void *data, async_cookie_t cookie); static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); +static void ufshcd_hba_exit(struct ufs_hba *hba); +static int ufshcd_probe_hba(struct ufs_hba *hba); /* * ufshcd_wait_for_register - wait for register value to change @@ -2043,6 +2051,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) msleep(5); } + /* enable UIC related interrupts */ + ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); + if (hba->vops && hba->vops->hce_enable_notify) hba->vops->hce_enable_notify(hba, POST_CHANGE); @@ -2058,23 +2069,33 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) static int ufshcd_link_startup(struct ufs_hba *hba) { int ret; + int retries = DME_LINKSTARTUP_RETRIES; - /* enable UIC related interrupts */ - ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); + do { + if (hba->vops && hba->vops->link_startup_notify) + hba->vops->link_startup_notify(hba, PRE_CHANGE); - if (hba->vops && hba->vops->link_startup_notify) - hba->vops->link_startup_notify(hba, PRE_CHANGE); + ret = ufshcd_dme_link_startup(hba); - ret = ufshcd_dme_link_startup(hba); - if (ret) - goto out; + /* check if device is detected by inter-connect layer */ + if (!ret && !ufshcd_is_device_present(hba)) { + dev_err(hba->dev, "%s: Device not present\n", __func__); + ret = -ENXIO; + goto out; + } - /* check if device is detected by inter-connect layer */ - if (!ufshcd_is_device_present(hba)) { - dev_err(hba->dev, "%s: Device not present\n", __func__); - ret = -ENXIO; + /* + * DME link lost indication is only received when link is up, + * but we can't be sure if the link is up until link startup + * succeeds. So reset the local Uni-Pro and try again. + */ + if (ret && ufshcd_hba_enable(hba)) + goto out; + } while (ret && retries--); + + if (ret) + /* failed to get the link up... retire */ goto out; - } /* Include any host controller configuration via UIC commands */ if (hba->vops && hba->vops->link_startup_notify) { @@ -3139,7 +3160,6 @@ out: static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) { int err; - async_cookie_t cookie; unsigned long flags; /* Reset the host controller */ @@ -3152,10 +3172,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) goto out; /* Establish the link again and restore the device */ - cookie = async_schedule(ufshcd_async_scan, hba); - /* wait for async scan to be completed */ - async_synchronize_cookie(++cookie); - if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) + err = ufshcd_probe_hba(hba); + + if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) err = -EIO; out: if (err) @@ -3177,8 +3196,11 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba) { int err = 0; unsigned long flags; + int retries = MAX_HOST_RESET_RETRIES; - err = ufshcd_host_reset_and_restore(hba); + do { + err = ufshcd_host_reset_and_restore(hba); + } while (err && --retries); /* * After reset the door-bell might be cleared, complete @@ -3243,13 +3265,13 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) } /** - * ufshcd_async_scan - asynchronous execution for link startup - * @data: data pointer to pass to this function - * @cookie: cookie data + * ufshcd_probe_hba - probe hba to detect device and initialize + * @hba: per-adapter instance + * + * Execute link-startup and verify device initialization */ -static void ufshcd_async_scan(void *data, async_cookie_t cookie) +static int ufshcd_probe_hba(struct ufs_hba *hba) { - struct ufs_hba *hba = (struct ufs_hba *)data; int ret; ret = ufshcd_link_startup(hba); @@ -3275,7 +3297,26 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) pm_runtime_put_sync(hba->dev); } out: - return; + /* + * If we failed to initialize the device or the device is not + * present, turn off the power/clocks etc. + */ + if (ret && !ufshcd_eh_in_progress(hba)) + ufshcd_hba_exit(hba); + + return ret; +} + +/** + * ufshcd_async_scan - asynchronous execution for probing hba + * @data: data pointer to pass to this function + * @cookie: cookie data + */ +static void ufshcd_async_scan(void *data, async_cookie_t cookie) +{ + struct ufs_hba *hba = (struct ufs_hba *)data; + + ufshcd_probe_hba(hba); } static struct scsi_host_template ufshcd_driver_template = { @@ -3631,6 +3672,7 @@ static int ufshcd_hba_init(struct ufs_hba *hba) if (err) goto out_disable_vreg; + hba->is_powered = true; goto out; out_disable_vreg: @@ -3645,10 +3687,13 @@ out: static void ufshcd_hba_exit(struct ufs_hba *hba) { - ufshcd_variant_hba_exit(hba); - ufshcd_setup_vreg(hba, false); - ufshcd_setup_clocks(hba, false); - ufshcd_setup_hba_vreg(hba, false); + if (hba->is_powered) { + ufshcd_variant_hba_exit(hba); + ufshcd_setup_vreg(hba, false); + ufshcd_setup_clocks(hba, false); + ufshcd_setup_hba_vreg(hba, false); + hba->is_powered = false; + } } /** diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index bc0f7ed02605..eddb3f3b4139 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -228,6 +228,7 @@ struct ufs_hba_variant_ops { * @eh_flags: Error handling flags * @intr_mask: Interrupt Mask Bits * @ee_ctrl_mask: Exception event control mask + * @is_powered: flag to check if HBA is powered * @eh_work: Worker to handle UFS errors that require s/w attention * @eeh_work: Worker to handle exception events * @errors: HBA errors @@ -283,6 +284,7 @@ struct ufs_hba { u32 eh_flags; u32 intr_mask; u16 ee_ctrl_mask; + bool is_powered; /* Work Queues */ struct work_struct eh_work; -- cgit From 3a4bf06d1f6d7de17528b962bc91fcbb2e568b4e Mon Sep 17 00:00:00 2001 From: Yaniv Gardi Date: Thu, 25 Sep 2014 15:32:27 +0300 Subject: ufs: Active Power Mode - configuring bActiveICCLevel The maximum power consumption in active is determined by bActiveICCLevel. The configuration is done by reading max current supported by the regulators connected to VCC, VCCQ and VCCQ2 rails on the boards, and reading the current consumption levels from the device for each rails (vcc/vccq/vccq2) using power descriptor. We configure the bActiveICCLevel attribute, with the max value that correspond to the minimum-of(VCC-current-level,VCCQ-current-level, VCCQ2-current-level). In order to minimize resume latency, pre-fetch icc levels and reference clock during initialization and avoid reading them each link startup during resume. Signed-off-by: Raviv Shvili Signed-off-by: Yaniv Gardi Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/ufs.h | 26 ++++++++++ drivers/scsi/ufs/ufshcd.c | 126 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/ufs/ufshcd.h | 13 +++++ 3 files changed, 165 insertions(+) diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index f76a304e7408..4ca99ed8ee4a 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -115,6 +115,7 @@ enum flag_idn { /* Attribute idn for Query requests */ enum attr_idn { + QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03, QUERY_ATTR_IDN_BKOPS_STATUS = 0x05, QUERY_ATTR_IDN_EE_CONTROL = 0x0D, QUERY_ATTR_IDN_EE_STATUS = 0x0E, @@ -174,6 +175,31 @@ enum unit_desc_param { UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, }; +/* bActiveICCLevel parameter current units */ +enum { + UFSHCD_NANO_AMP = 0, + UFSHCD_MICRO_AMP = 1, + UFSHCD_MILI_AMP = 2, + UFSHCD_AMP = 3, +}; + +#define POWER_DESC_MAX_SIZE 0x62 +#define POWER_DESC_MAX_ACTV_ICC_LVLS 16 + +/* Attribute bActiveICCLevel parameter bit masks definitions */ +#define ATTR_ICC_LVL_UNIT_OFFSET 14 +#define ATTR_ICC_LVL_UNIT_MASK (0x3 << ATTR_ICC_LVL_UNIT_OFFSET) +#define ATTR_ICC_LVL_VALUE_MASK 0x3FF + +/* Power descriptor parameters offsets in bytes */ +enum power_desc_param_offset { + PWR_DESC_LEN = 0x0, + PWR_DESC_TYPE = 0x1, + PWR_DESC_ACTIVE_LVLS_VCC_0 = 0x2, + PWR_DESC_ACTIVE_LVLS_VCCQ_0 = 0x22, + PWR_DESC_ACTIVE_LVLS_VCCQ2_0 = 0x42, +}; + /* Exception event mask values */ enum { MASK_EE_STATUS = 0xFFFF, diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index af29d4c0a416..dc89c486d40a 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3264,6 +3264,125 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) return err; } +/** + * ufshcd_get_max_icc_level - calculate the ICC level + * @sup_curr_uA: max. current supported by the regulator + * @start_scan: row at the desc table to start scan from + * @buff: power descriptor buffer + * + * Returns calculated max ICC level for specific regulator + */ +static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff) +{ + int i; + int curr_uA; + u16 data; + u16 unit; + + for (i = start_scan; i >= 0; i--) { + data = be16_to_cpu(*((u16 *)(buff + 2*i))); + unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> + ATTR_ICC_LVL_UNIT_OFFSET; + curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; + switch (unit) { + case UFSHCD_NANO_AMP: + curr_uA = curr_uA / 1000; + break; + case UFSHCD_MILI_AMP: + curr_uA = curr_uA * 1000; + break; + case UFSHCD_AMP: + curr_uA = curr_uA * 1000 * 1000; + break; + case UFSHCD_MICRO_AMP: + default: + break; + } + if (sup_curr_uA >= curr_uA) + break; + } + if (i < 0) { + i = 0; + pr_err("%s: Couldn't find valid icc_level = %d", __func__, i); + } + + return (u32)i; +} + +/** + * ufshcd_calc_icc_level - calculate the max ICC level + * In case regulators are not initialized we'll return 0 + * @hba: per-adapter instance + * @desc_buf: power descriptor buffer to extract ICC levels from. + * @len: length of desc_buff + * + * Returns calculated ICC level + */ +static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, + u8 *desc_buf, int len) +{ + u32 icc_level = 0; + + if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || + !hba->vreg_info.vccq2) { + dev_err(hba->dev, + "%s: Regulator capability was not set, actvIccLevel=%d", + __func__, icc_level); + goto out; + } + + if (hba->vreg_info.vcc) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vcc->max_uA, + POWER_DESC_MAX_ACTV_ICC_LVLS - 1, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); + + if (hba->vreg_info.vccq) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vccq->max_uA, + icc_level, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); + + if (hba->vreg_info.vccq2) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vccq2->max_uA, + icc_level, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]); +out: + return icc_level; +} + +static void ufshcd_init_icc_levels(struct ufs_hba *hba) +{ + int ret; + int buff_len = QUERY_DESC_POWER_MAX_SIZE; + u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE]; + + ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); + if (ret) { + dev_err(hba->dev, + "%s: Failed reading power descriptor.len = %d ret = %d", + __func__, buff_len, ret); + return; + } + + hba->init_prefetch_data.icc_level = + ufshcd_find_max_sup_active_icc_level(hba, + desc_buf, buff_len); + dev_dbg(hba->dev, "%s: setting icc_level 0x%x", + __func__, hba->init_prefetch_data.icc_level); + + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, + &hba->init_prefetch_data.icc_level); + + if (ret) + dev_err(hba->dev, + "%s: Failed configuring bActiveICCLevel = %d ret = %d", + __func__, hba->init_prefetch_data.icc_level , ret); + +} + /** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance @@ -3293,9 +3412,16 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* If we are in error handling context no need to scan the host */ if (!ufshcd_eh_in_progress(hba)) { + if (!hba->is_init_prefetch) + ufshcd_init_icc_levels(hba); + scsi_scan_host(hba->host); pm_runtime_put_sync(hba->dev); } + + if (!hba->is_init_prefetch) + hba->is_init_prefetch = true; + out: /* * If we failed to initialize the device or the device is not diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index eddb3f3b4139..8365ad437aa6 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -195,6 +195,15 @@ struct ufs_hba_variant_ops { int (*link_startup_notify)(struct ufs_hba *, bool); }; +/** + * struct ufs_init_prefetch - contains data that is pre-fetched once during + * initialization + * @icc_level: icc level which was read during initialization + */ +struct ufs_init_prefetch { + u32 icc_level; +}; + /** * struct ufs_hba - per adapter private structure * @mmio_base: UFSHCI base register address @@ -229,6 +238,8 @@ struct ufs_hba_variant_ops { * @intr_mask: Interrupt Mask Bits * @ee_ctrl_mask: Exception event control mask * @is_powered: flag to check if HBA is powered + * @is_init_prefetch: flag to check if data was pre-fetched in initialization + * @init_prefetch_data: data pre-fetched during initialization * @eh_work: Worker to handle UFS errors that require s/w attention * @eeh_work: Worker to handle exception events * @errors: HBA errors @@ -285,6 +296,8 @@ struct ufs_hba { u32 intr_mask; u16 ee_ctrl_mask; bool is_powered; + bool is_init_prefetch; + struct ufs_init_prefetch init_prefetch_data; /* Work Queues */ struct work_struct eh_work; -- cgit From 2a8fa600445c45222632810a4811ce820279d106 Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Thu, 25 Sep 2014 15:32:28 +0300 Subject: ufs: manually add well known logical units UFS device specification requires the UFS devices to support 4 well known logical units: "REPORT_LUNS" (address: 01h) "UFS Device" (address: 50h) "RPMB" (address: 44h) "BOOT" (address: 30h) UFS device's power management needs to be controlled by "POWER CONDITION" field of SSU (START STOP UNIT) command. But this "power condition" field will take effect only when its sent to "UFS device" well known logical unit hence we require the scsi_device instance to represent this logical unit in order for the UFS host driver to send the SSU command for power management. We also require the scsi_device instance for "RPMB" (Replay Protected Memory Block) LU so user space process can control this LU. User space may also want to have access to BOOT LU. This patch adds the scsi device instances for each of all well known LUs (except "REPORT LUNS" LU). Signed-off-by: Subhash Jadavani Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/ufshcd.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/ufs/ufshcd.h | 7 ++++ 2 files changed, 110 insertions(+) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index dc89c486d40a..692fd7aee142 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -901,6 +901,17 @@ static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) return ret; } +/** + * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID + * @scsi_lun: UPIU W-LUN id + * + * Returns SCSI W-LUN id + */ +static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) +{ + return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; +} + /** * ufshcd_queuecommand - main entry point for SCSI requests * @cmd: command from SCSI Midlayer @@ -3383,6 +3394,93 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) } +/** + * ufshcd_scsi_add_wlus - Adds required W-LUs + * @hba: per-adapter instance + * + * UFS device specification requires the UFS devices to support 4 well known + * logical units: + * "REPORT_LUNS" (address: 01h) + * "UFS Device" (address: 50h) + * "RPMB" (address: 44h) + * "BOOT" (address: 30h) + * UFS device's power management needs to be controlled by "POWER CONDITION" + * field of SSU (START STOP UNIT) command. But this "power condition" field + * will take effect only when its sent to "UFS device" well known logical unit + * hence we require the scsi_device instance to represent this logical unit in + * order for the UFS host driver to send the SSU command for power management. + + * We also require the scsi_device instance for "RPMB" (Replay Protected Memory + * Block) LU so user space process can control this LU. User space may also + * want to have access to BOOT LU. + + * This function adds scsi device instances for each of all well known LUs + * (except "REPORT LUNS" LU). + * + * Returns zero on success (all required W-LUs are added successfully), + * non-zero error value on failure (if failed to add any of the required W-LU). + */ +static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) +{ + int ret = 0; + + hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); + if (IS_ERR(hba->sdev_ufs_device)) { + ret = PTR_ERR(hba->sdev_ufs_device); + hba->sdev_ufs_device = NULL; + goto out; + } + + hba->sdev_boot = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); + if (IS_ERR(hba->sdev_boot)) { + ret = PTR_ERR(hba->sdev_boot); + hba->sdev_boot = NULL; + goto remove_sdev_ufs_device; + } + + hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); + if (IS_ERR(hba->sdev_rpmb)) { + ret = PTR_ERR(hba->sdev_rpmb); + hba->sdev_rpmb = NULL; + goto remove_sdev_boot; + } + goto out; + +remove_sdev_boot: + scsi_remove_device(hba->sdev_boot); +remove_sdev_ufs_device: + scsi_remove_device(hba->sdev_ufs_device); +out: + return ret; +} + +/** + * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by + * ufshcd_scsi_add_wlus() + * @hba: per-adapter instance + * + */ +static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba) +{ + if (hba->sdev_ufs_device) { + scsi_remove_device(hba->sdev_ufs_device); + hba->sdev_ufs_device = NULL; + } + + if (hba->sdev_boot) { + scsi_remove_device(hba->sdev_boot); + hba->sdev_boot = NULL; + } + + if (hba->sdev_rpmb) { + scsi_remove_device(hba->sdev_rpmb); + hba->sdev_rpmb = NULL; + } +} + /** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance @@ -3415,6 +3513,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (!hba->is_init_prefetch) ufshcd_init_icc_levels(hba); + /* Add required well known logical units to scsi mid layer */ + if (ufshcd_scsi_add_wlus(hba)) + goto out; + scsi_scan_host(hba->host); pm_runtime_put_sync(hba->dev); } @@ -3901,6 +4003,7 @@ EXPORT_SYMBOL(ufshcd_runtime_idle); void ufshcd_remove(struct ufs_hba *hba) { scsi_remove_host(hba->host); + ufshcd_scsi_remove_wlus(hba); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 8365ad437aa6..25065ea565e2 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -266,6 +266,13 @@ struct ufs_hba { struct Scsi_Host *host; struct device *dev; + /* + * This field is to keep a reference to "scsi_device" corresponding to + * "UFS device" W-LU. + */ + struct scsi_device *sdev_ufs_device; + struct scsi_device *sdev_rpmb; + struct scsi_device *sdev_boot; struct ufshcd_lrb *lrb; unsigned long lrb_in_use; -- cgit From 0ce147d48a3e3352859f0c185e98e8392bee7a25 Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Thu, 25 Sep 2014 15:32:29 +0300 Subject: ufs: introduce well known logical unit in ufs UFS device may have standard LUs and LUN id could be from 0x00 to 0x7F. UFS device specification use "Peripheral Device Addressing Format" (SCSI SAM-5) for standard LUs. UFS device may also have the Well Known LUs (also referred as W-LU) which again could be from 0x00 to 0x7F. For W-LUs, UFS device specification only allows the "Extended Addressing Format" (SCSI SAM-5) which means the W-LUNs would start from 0xC100 onwards. This means max. LUN number reported from UFS device could be 0xC17F hence this patch advertise the "max_lun" as 0xC17F which will allow SCSI mid layer to detect the W-LUs as well. But once the W-LUs are detected, UFSHCD driver may get the commands with SCSI LUN id upto 0xC17F but UPIU LUN id field is only 8-bit wide so it requires the mapping of SCSI LUN id to UPIU LUN id. This patch also add support for this mapping. Signed-off-by: Subhash Jadavani Signed-off-by: Dolev Raviv Signed-off-by: Sujit Reddy Thumma Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/ufs.h | 21 +++++++++++- drivers/scsi/ufs/ufshcd.c | 84 ++++++++++++++++++++++++++++++++++++----------- drivers/scsi/ufs/ufshcd.h | 2 +- 3 files changed, 85 insertions(+), 22 deletions(-) diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 4ca99ed8ee4a..37d64c1fb8da 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -49,9 +49,28 @@ #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ cpu_to_be32((byte3 << 24) | (byte2 << 16) |\ (byte1 << 8) | (byte0)) - +/* + * UFS device may have standard LUs and LUN id could be from 0x00 to + * 0x7F. Standard LUs use "Peripheral Device Addressing Format". + * UFS device may also have the Well Known LUs (also referred as W-LU) + * which again could be from 0x00 to 0x7F. For W-LUs, device only use + * the "Extended Addressing Format" which means the W-LUNs would be + * from 0xc100 (SCSI_W_LUN_BASE) onwards. + * This means max. LUN number reported from UFS device could be 0xC17F. + */ +#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F +#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID) +#define UFS_UPIU_WLUN_ID (1 << 7) #define UFS_UPIU_MAX_GENERAL_LUN 8 +/* Well known logical unit id in LUN field of UPIU */ +enum { + UFS_UPIU_REPORT_LUNS_WLUN = 0x81, + UFS_UPIU_UFS_DEVICE_WLUN = 0xD0, + UFS_UPIU_BOOT_WLUN = 0xB0, + UFS_UPIU_RPMB_WLUN = 0xC4, +}; + /* * UFS Protocol Information Unit related definitions */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 692fd7aee142..a1eae495fa67 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -100,7 +100,6 @@ static u32 ufs_query_desc_max_size[] = { enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, - UFSHCD_MAX_LUNS = 8, UFSHCD_CMD_PER_LUN = 32, UFSHCD_CAN_QUEUE = 32, }; @@ -901,6 +900,21 @@ static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) return ret; } +/* + * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN + * @scsi_lun: scsi LUN id + * + * Returns UPIU LUN id + */ +static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) +{ + if (scsi_is_wlun(scsi_lun)) + return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID) + | UFS_UPIU_WLUN_ID; + else + return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; +} + /** * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID * @scsi_lun: UPIU W-LUN id @@ -970,7 +984,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; lrbp->sense_buffer = cmd->sense_buffer; lrbp->task_tag = tag; - lrbp->lun = cmd->device->lun; + lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); lrbp->intr_cmd = false; lrbp->command_type = UTP_CMD_TYPE_SCSI; @@ -1524,7 +1538,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, * Unit descriptors are only available for general purpose LUs (LUN id * from 0 to 7) and RPMB Well known LU. */ - if (lun >= UFS_UPIU_MAX_GENERAL_LUN) + if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN)) return -EOPNOTSUPP; return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, @@ -2154,6 +2168,44 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) return err; } +/** + * ufshcd_set_queue_depth - set lun queue depth + * @sdev: pointer to SCSI device + * + * Read bLUQueueDepth value and activate scsi tagged command + * queueing. For WLUN, queue depth is set to 1. For best-effort + * cases (bLUQueueDepth = 0) the queue depth is set to a maximum + * value that host can queue. + */ +static void ufshcd_set_queue_depth(struct scsi_device *sdev) +{ + int ret = 0; + u8 lun_qdepth; + struct ufs_hba *hba; + + hba = shost_priv(sdev->host); + + lun_qdepth = hba->nutrs; + ret = ufshcd_read_unit_desc_param(hba, + ufshcd_scsi_to_upiu_lun(sdev->lun), + UNIT_DESC_PARAM_LU_Q_DEPTH, + &lun_qdepth, + sizeof(lun_qdepth)); + + /* Some WLUN doesn't support unit descriptor */ + if (ret == -EOPNOTSUPP) + lun_qdepth = 1; + else if (!lun_qdepth) + /* eventually, we can figure out the real queue depth */ + lun_qdepth = hba->nutrs; + else + lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); + + dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", + __func__, lun_qdepth); + scsi_activate_tcq(sdev, lun_qdepth); +} + /** * ufshcd_slave_alloc - handle initial SCSI device configurations * @sdev: pointer to SCSI device @@ -2163,8 +2215,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) static int ufshcd_slave_alloc(struct scsi_device *sdev) { struct ufs_hba *hba; - u8 lun_qdepth; - int ret; hba = shost_priv(sdev->host); sdev->tagged_supported = 1; @@ -2179,20 +2229,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) /* REPORT SUPPORTED OPERATION CODES is not supported */ sdev->no_report_opcodes = 1; - ret = ufshcd_read_unit_desc_param(hba, - sdev->lun, - UNIT_DESC_PARAM_LU_Q_DEPTH, - &lun_qdepth, - sizeof(lun_qdepth)); - if (!ret || !lun_qdepth) - /* eventually, we can figure out the real queue depth */ - lun_qdepth = hba->nutrs; - else - lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); - dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", - __func__, lun_qdepth); - scsi_activate_tcq(sdev, lun_qdepth); + ufshcd_set_queue_depth(sdev); return 0; } @@ -2255,6 +2293,9 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) hba = shost_priv(sdev->host); scsi_deactivate_tcq(sdev, hba->nutrs); + /* Drop the reference as it won't be needed anymore */ + if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) + hba->sdev_ufs_device = NULL; } /** @@ -2972,7 +3013,10 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, lun_id, task_tag); task_req_upiup->header.dword_1 = UPIU_HEADER_DWORD(0, tm_function, 0, 0); - + /* + * The host shall provide the same value for LUN field in the basic + * header and for Input Parameter. + */ task_req_upiup->input_param1 = cpu_to_be32(lun_id); task_req_upiup->input_param2 = cpu_to_be32(task_id); @@ -4121,7 +4165,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) host->can_queue = hba->nutrs; host->cmd_per_lun = hba->nutrs; host->max_id = UFSHCD_MAX_ID; - host->max_lun = UFSHCD_MAX_LUNS; + host->max_lun = UFS_MAX_LUNS; host->max_channel = UFSHCD_MAX_CHANNEL; host->unique_id = host->host_no; host->max_cmd_len = MAX_CDB_SIZE; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 25065ea565e2..5c25337bfcc8 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -124,7 +124,7 @@ struct ufshcd_lrb { int command_type; int task_tag; - unsigned int lun; + u8 lun; /* UPIU LUN id field is only 8-bit wide */ bool intr_cmd; }; -- cgit From 57d104c153d3d6d7bea60089e80f37501851ed2c Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Thu, 25 Sep 2014 15:32:30 +0300 Subject: ufs: add UFS power management support This patch adds support for UFS device and UniPro link power management during runtime/system PM. Main idea is to define multiple UFS low power levels based on UFS device and UFS link power states. This would allow any specific platform or pci driver to choose the best suited low power level during runtime and system suspend based on their power goals. bkops handlig: To put the UFS device in sleep state when bkops is disabled, first query the bkops status from the device and enable bkops on device only if device needs time to perform the bkops. START_STOP handling: Before sending START_STOP_UNIT to the device well-known logical unit (w-lun) to make sure that the device w-lun unit attention condition is cleared. Write protection: UFS device specification allows LUs to be write protected, either permanently or power on write protected. If any LU is power on write protected and if the card is power cycled (by powering off VCCQ and/or VCC rails), LU's write protect status would be lost. So this means those LUs can be written now. To ensures that UFS device is power cycled only if the power on protect is not set for any of the LUs, check if power on write protect is set and if device is in sleep/power-off state & link in inactive state (Hibern8 or OFF state). If none of the Logical Units on UFS device is power on write protected then all UFS device power rails (VCC, VCCQ & VCCQ2) can be turned off if UFS device is in power-off state and UFS link is in OFF state. But current implementation would disable all device power rails even if UFS link is not in OFF state. Low power mode: If UFS link is in OFF state then UFS host controller can be power collapsed to avoid leakage current from it. Note that if UFS host controller is power collapsed, full UFS reinitialization will be required on resume to re-establish the link between host and device. Signed-off-by: Subhash Jadavani Signed-off-by: Dolev Raviv Signed-off-by: Sujit Reddy Thumma Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/ufs.h | 36 +- drivers/scsi/ufs/ufshcd-pci.c | 45 +- drivers/scsi/ufs/ufshcd-pltfrm.c | 60 +-- drivers/scsi/ufs/ufshcd.c | 945 +++++++++++++++++++++++++++++++++++---- drivers/scsi/ufs/ufshcd.h | 76 +++- drivers/scsi/ufs/ufshci.h | 9 +- 6 files changed, 989 insertions(+), 182 deletions(-) diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 37d64c1fb8da..42c459a9d3fe 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -129,6 +129,7 @@ enum { /* Flag idn for Query Requests*/ enum flag_idn { QUERY_FLAG_IDN_FDEVICEINIT = 0x01, + QUERY_FLAG_IDN_PWR_ON_WPE = 0x03, QUERY_FLAG_IDN_BKOPS_EN = 0x04, }; @@ -194,6 +195,18 @@ enum unit_desc_param { UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, }; +/* + * Logical Unit Write Protect + * 00h: LU not write protected + * 01h: LU write protected when fPowerOnWPEn =1 + * 02h: LU permanently write protected when fPermanentWPEn =1 + */ +enum ufs_lu_wp_type { + UFS_LU_NO_WP = 0x00, + UFS_LU_POWER_ON_WP = 0x01, + UFS_LU_PERM_WP = 0x02, +}; + /* bActiveICCLevel parameter current units */ enum { UFSHCD_NANO_AMP = 0, @@ -226,11 +239,12 @@ enum { }; /* Background operation status */ -enum { +enum bkops_status { BKOPS_STATUS_NO_OP = 0x0, BKOPS_STATUS_NON_CRITICAL = 0x1, BKOPS_STATUS_PERF_IMPACT = 0x2, BKOPS_STATUS_CRITICAL = 0x3, + BKOPS_STATUS_MAX = BKOPS_STATUS_CRITICAL, }; /* UTP QUERY Transaction Specific Fields OpCode */ @@ -291,6 +305,14 @@ enum { UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05, UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09, }; + +/* UFS device power modes */ +enum ufs_dev_pwr_mode { + UFS_ACTIVE_PWR_MODE = 1, + UFS_SLEEP_PWR_MODE = 2, + UFS_POWERDOWN_PWR_MODE = 3, +}; + /** * struct utp_upiu_header - UPIU header structure * @dword_0: UPIU header DW-0 @@ -437,6 +459,12 @@ struct ufs_query_res { #define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */ #define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */ +/* + * VCCQ & VCCQ2 current requirement when UFS device is in sleep state + * and link is in Hibern8 state. + */ +#define UFS_VREG_LPM_LOAD_UA 1000 /* uA */ + struct ufs_vreg { struct regulator *reg; const char *name; @@ -454,4 +482,10 @@ struct ufs_vreg_info { struct ufs_vreg *vdd_hba; }; +struct ufs_dev_info { + bool f_power_on_wp_en; + /* Keeps information if any of the LU is power on write protected */ + bool is_lu_power_on_wp; +}; + #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 2a26faa95b77..955ed5587011 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -43,34 +43,24 @@ * @pdev: pointer to PCI device handle * @state: power state * - * Returns -ENOSYS + * Returns 0 if successful + * Returns non-zero otherwise */ static int ufshcd_pci_suspend(struct device *dev) { - /* - * TODO: - * 1. Call ufshcd_suspend - * 2. Do bus specific power management - */ - - return -ENOSYS; + return ufshcd_system_suspend(dev_get_drvdata(dev)); } /** * ufshcd_pci_resume - resume power management function * @pdev: pointer to PCI device handle * - * Returns -ENOSYS + * Returns 0 if successful + * Returns non-zero otherwise */ static int ufshcd_pci_resume(struct device *dev) { - /* - * TODO: - * 1. Call ufshcd_resume. - * 2. Do bus specific wake up - */ - - return -ENOSYS; + return ufshcd_system_resume(dev_get_drvdata(dev)); } #else #define ufshcd_pci_suspend NULL @@ -80,30 +70,15 @@ static int ufshcd_pci_resume(struct device *dev) #ifdef CONFIG_PM_RUNTIME static int ufshcd_pci_runtime_suspend(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_suspend(hba); + return ufshcd_runtime_suspend(dev_get_drvdata(dev)); } static int ufshcd_pci_runtime_resume(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_resume(hba); + return ufshcd_runtime_resume(dev_get_drvdata(dev)); } static int ufshcd_pci_runtime_idle(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_idle(hba); + return ufshcd_runtime_idle(dev_get_drvdata(dev)); } #else /* !CONFIG_PM_RUNTIME */ #define ufshcd_pci_runtime_suspend NULL @@ -117,7 +92,7 @@ static int ufshcd_pci_runtime_idle(struct device *dev) */ static void ufshcd_pci_shutdown(struct pci_dev *pdev) { - ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev)); + ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev)); } /** diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index dde4e6e3be70..2482bbac3681 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -225,45 +225,24 @@ out: * ufshcd_pltfrm_suspend - suspend power management function * @dev: pointer to device handle * - * - * Returns 0 + * Returns 0 if successful + * Returns non-zero otherwise */ static int ufshcd_pltfrm_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct ufs_hba *hba = platform_get_drvdata(pdev); - - /* - * TODO: - * 1. Call ufshcd_suspend - * 2. Do bus specific power management - */ - - disable_irq(hba->irq); - - return 0; + return ufshcd_system_suspend(dev_get_drvdata(dev)); } /** * ufshcd_pltfrm_resume - resume power management function * @dev: pointer to device handle * - * Returns 0 + * Returns 0 if successful + * Returns non-zero otherwise */ static int ufshcd_pltfrm_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct ufs_hba *hba = platform_get_drvdata(pdev); - - /* - * TODO: - * 1. Call ufshcd_resume. - * 2. Do bus specific wake up - */ - - enable_irq(hba->irq); - - return 0; + return ufshcd_system_resume(dev_get_drvdata(dev)); } #else #define ufshcd_pltfrm_suspend NULL @@ -273,30 +252,15 @@ static int ufshcd_pltfrm_resume(struct device *dev) #ifdef CONFIG_PM_RUNTIME static int ufshcd_pltfrm_runtime_suspend(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_suspend(hba); + return ufshcd_runtime_suspend(dev_get_drvdata(dev)); } static int ufshcd_pltfrm_runtime_resume(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_resume(hba); + return ufshcd_runtime_resume(dev_get_drvdata(dev)); } static int ufshcd_pltfrm_runtime_idle(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_idle(hba); + return ufshcd_runtime_idle(dev_get_drvdata(dev)); } #else /* !CONFIG_PM_RUNTIME */ #define ufshcd_pltfrm_runtime_suspend NULL @@ -304,6 +268,11 @@ static int ufshcd_pltfrm_runtime_idle(struct device *dev) #define ufshcd_pltfrm_runtime_idle NULL #endif /* CONFIG_PM_RUNTIME */ +static void ufshcd_pltfrm_shutdown(struct platform_device *pdev) +{ + ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev)); +} + /** * ufshcd_pltfrm_probe - probe routine of the driver * @pdev: pointer to Platform device handle @@ -404,6 +373,7 @@ static const struct dev_pm_ops ufshcd_dev_pm_ops = { static struct platform_driver ufshcd_pltfrm_driver = { .probe = ufshcd_pltfrm_probe, .remove = ufshcd_pltfrm_remove, + .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "ufshcd", .owner = THIS_MODULE, diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index a1eae495fa67..f2b50bc13ac9 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -44,7 +44,6 @@ #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ UTP_TASK_REQ_COMPL |\ - UIC_POWER_MODE |\ UFSHCD_ERROR_MASK) /* UIC command timeout, unit: ms */ #define UIC_CMD_TIMEOUT 500 @@ -138,12 +137,72 @@ enum { #define ufshcd_clear_eh_in_progress(h) \ (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) +#define ufshcd_set_ufs_dev_active(h) \ + ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) +#define ufshcd_set_ufs_dev_sleep(h) \ + ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) +#define ufshcd_set_ufs_dev_poweroff(h) \ + ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) +#define ufshcd_is_ufs_dev_active(h) \ + ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) +#define ufshcd_is_ufs_dev_sleep(h) \ + ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) +#define ufshcd_is_ufs_dev_poweroff(h) \ + ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) + +static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { + {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, + {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, + {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, +}; + +static inline enum ufs_dev_pwr_mode +ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) +{ + return ufs_pm_lvl_states[lvl].dev_state; +} + +static inline enum uic_link_state +ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) +{ + return ufs_pm_lvl_states[lvl].link_state; +} + static void ufshcd_tmc_handler(struct ufs_hba *hba); static void ufshcd_async_scan(void *data, async_cookie_t cookie); static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); static void ufshcd_hba_exit(struct ufs_hba *hba); static int ufshcd_probe_hba(struct ufs_hba *hba); +static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); +static irqreturn_t ufshcd_intr(int irq, void *__hba); + +static inline int ufshcd_enable_irq(struct ufs_hba *hba) +{ + int ret = 0; + + if (!hba->is_irq_enabled) { + ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, + hba); + if (ret) + dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", + __func__, ret); + hba->is_irq_enabled = true; + } + + return ret; +} + +static inline void ufshcd_disable_irq(struct ufs_hba *hba) +{ + if (hba->is_irq_enabled) { + free_irq(hba->irq, hba); + hba->is_irq_enabled = false; + } +} /* * ufshcd_wait_for_register - wait for register value to change @@ -609,15 +668,12 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * @uic_cmd: UIC command * * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called - * with mutex held. + * with mutex held and host_lock locked. * Returns 0 only if success. */ static int __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { - int ret; - unsigned long flags; - if (!ufshcd_ready_for_uic_cmd(hba)) { dev_err(hba->dev, "Controller not ready to accept UIC commands\n"); @@ -626,13 +682,9 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) init_completion(&uic_cmd->done); - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_dispatch_uic_cmd(hba, uic_cmd); - spin_unlock_irqrestore(hba->host->host_lock, flags); - ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); - - return ret; + return 0; } /** @@ -646,9 +698,15 @@ static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { int ret; + unsigned long flags; mutex_lock(&hba->uic_cmd_mutex); + spin_lock_irqsave(hba->host->host_lock, flags); ret = __ufshcd_send_uic_cmd(hba, uic_cmd); + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (!ret) + ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); + mutex_unlock(&hba->uic_cmd_mutex); return ret; @@ -1789,44 +1847,54 @@ out: EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); /** - * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage - * using DME_SET primitives. + * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power + * state) and waits for it to take effect. + * * @hba: per adapter instance - * @mode: powr mode value + * @cmd: UIC command to execute + * + * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & + * DME_HIBERNATE_EXIT commands take some time to take its effect on both host + * and device UniPro link and hence it's final completion would be indicated by + * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in + * addition to normal UIC command completion Status (UCCS). This function only + * returns after the relevant status bits indicate the completion. * * Returns 0 on success, non-zero value on failure */ -static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) { - struct uic_command uic_cmd = {0}; - struct completion pwr_done; + struct completion uic_async_done; unsigned long flags; u8 status; int ret; - uic_cmd.command = UIC_CMD_DME_SET; - uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); - uic_cmd.argument3 = mode; - init_completion(&pwr_done); - mutex_lock(&hba->uic_cmd_mutex); + init_completion(&uic_async_done); spin_lock_irqsave(hba->host->host_lock, flags); - hba->pwr_done = &pwr_done; + hba->uic_async_done = &uic_async_done; + ret = __ufshcd_send_uic_cmd(hba, cmd); spin_unlock_irqrestore(hba->host->host_lock, flags); - ret = __ufshcd_send_uic_cmd(hba, &uic_cmd); if (ret) { dev_err(hba->dev, - "pwr mode change with mode 0x%x uic error %d\n", - mode, ret); + "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", + cmd->command, cmd->argument3, ret); + goto out; + } + ret = ufshcd_wait_for_uic_cmd(hba, cmd); + if (ret) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", + cmd->command, cmd->argument3, ret); goto out; } - if (!wait_for_completion_timeout(hba->pwr_done, + if (!wait_for_completion_timeout(hba->uic_async_done, msecs_to_jiffies(UIC_CMD_TIMEOUT))) { dev_err(hba->dev, - "pwr mode change with mode 0x%x completion timeout\n", - mode); + "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", + cmd->command, cmd->argument3); ret = -ETIMEDOUT; goto out; } @@ -1834,18 +1902,61 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) status = ufshcd_get_upmcrs(hba); if (status != PWR_LOCAL) { dev_err(hba->dev, - "pwr mode change failed, host umpcrs:0x%x\n", - status); + "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n", + cmd->command, status); ret = (status != PWR_OK) ? status : -1; } out: spin_lock_irqsave(hba->host->host_lock, flags); - hba->pwr_done = NULL; + hba->uic_async_done = NULL; spin_unlock_irqrestore(hba->host->host_lock, flags); mutex_unlock(&hba->uic_cmd_mutex); return ret; } +/** + * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage + * using DME_SET primitives. + * @hba: per adapter instance + * @mode: powr mode value + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +{ + struct uic_command uic_cmd = {0}; + + uic_cmd.command = UIC_CMD_DME_SET; + uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); + uic_cmd.argument3 = mode; + + return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); +} + +static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + + uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; + + return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); +} + +static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + if (ret) { + ufshcd_set_link_off(hba); + ret = ufshcd_host_reset_and_restore(hba); + } + + return ret; +} + /** * ufshcd_config_max_pwr_mode - Set & Change power mode with * maximum capability attribute information. @@ -2045,6 +2156,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) msleep(5); } + /* UniPro link is disabled at this point */ + ufshcd_set_link_off(hba); + if (hba->vops && hba->vops->hce_enable_notify) hba->vops->hce_enable_notify(hba, PRE_CHANGE); @@ -2077,7 +2191,7 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) } /* enable UIC related interrupts */ - ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); + ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); if (hba->vops && hba->vops->hce_enable_notify) hba->vops->hce_enable_notify(hba, POST_CHANGE); @@ -2206,6 +2320,62 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev) scsi_activate_tcq(sdev, lun_qdepth); } +/* + * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR + * @hba: per-adapter instance + * @lun: UFS device lun id + * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info + * + * Returns 0 in case of success and b_lu_write_protect status would be returned + * @b_lu_write_protect parameter. + * Returns -ENOTSUPP if reading b_lu_write_protect is not supported. + * Returns -EINVAL in case of invalid parameters passed to this function. + */ +static int ufshcd_get_lu_wp(struct ufs_hba *hba, + u8 lun, + u8 *b_lu_write_protect) +{ + int ret; + + if (!b_lu_write_protect) + ret = -EINVAL; + /* + * According to UFS device spec, RPMB LU can't be write + * protected so skip reading bLUWriteProtect parameter for + * it. For other W-LUs, UNIT DESCRIPTOR is not available. + */ + else if (lun >= UFS_UPIU_MAX_GENERAL_LUN) + ret = -ENOTSUPP; + else + ret = ufshcd_read_unit_desc_param(hba, + lun, + UNIT_DESC_PARAM_LU_WR_PROTECT, + b_lu_write_protect, + sizeof(*b_lu_write_protect)); + return ret; +} + +/** + * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect + * status + * @hba: per-adapter instance + * @sdev: pointer to SCSI device + * + */ +static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, + struct scsi_device *sdev) +{ + if (hba->dev_info.f_power_on_wp_en && + !hba->dev_info.is_lu_power_on_wp) { + u8 b_lu_write_protect; + + if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), + &b_lu_write_protect) && + (b_lu_write_protect == UFS_LU_POWER_ON_WP)) + hba->dev_info.is_lu_power_on_wp = true; + } +} + /** * ufshcd_slave_alloc - handle initial SCSI device configurations * @sdev: pointer to SCSI device @@ -2232,6 +2402,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) ufshcd_set_queue_depth(sdev); + ufshcd_get_lu_power_on_wp_status(hba, sdev); + return 0; } @@ -2462,8 +2634,8 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) complete(&hba->active_uic_cmd->done); } - if ((intr_status & UIC_POWER_MODE) && hba->pwr_done) - complete(hba->pwr_done); + if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) + complete(hba->uic_async_done); } /** @@ -2675,33 +2847,62 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) } /** - * ufshcd_urgent_bkops - handle urgent bkops exception event + * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status * @hba: per-adapter instance + * @status: bkops_status value * - * Enable fBackgroundOpsEn flag in the device to permit background - * operations. + * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn + * flag in the device to permit background operations if the device + * bkops_status is greater than or equal to "status" argument passed to + * this function, disable otherwise. + * + * Returns 0 for success, non-zero in case of failure. + * + * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag + * to know whether auto bkops is enabled or disabled after this function + * returns control to it. */ -static int ufshcd_urgent_bkops(struct ufs_hba *hba) +static int ufshcd_bkops_ctrl(struct ufs_hba *hba, + enum bkops_status status) { int err; - u32 status = 0; + u32 curr_status = 0; - err = ufshcd_get_bkops_status(hba, &status); + err = ufshcd_get_bkops_status(hba, &curr_status); if (err) { dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", __func__, err); goto out; + } else if (curr_status > BKOPS_STATUS_MAX) { + dev_err(hba->dev, "%s: invalid BKOPS status %d\n", + __func__, curr_status); + err = -EINVAL; + goto out; } - status = status & 0xF; - - /* handle only if status indicates performance impact or critical */ - if (status >= BKOPS_STATUS_PERF_IMPACT) + if (curr_status >= status) err = ufshcd_enable_auto_bkops(hba); + else + err = ufshcd_disable_auto_bkops(hba); out: return err; } +/** + * ufshcd_urgent_bkops - handle urgent bkops exception event + * @hba: per-adapter instance + * + * Enable fBackgroundOpsEn flag in the device to permit background + * operations. + * + * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled + * and negative error value for any other failure. + */ +static int ufshcd_urgent_bkops(struct ufs_hba *hba) +{ + return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); +} + static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) { return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, @@ -2733,7 +2934,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) status &= hba->ee_ctrl_mask; if (status & MASK_EE_URGENT_BKOPS) { err = ufshcd_urgent_bkops(hba); - if (err) + if (err < 0) dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", __func__, err); } @@ -3539,7 +3740,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ret) goto out; - ufshcd_config_max_pwr_mode(hba); + /* UniPro link is active now */ + ufshcd_set_link_active(hba); ret = ufshcd_verify_dev_init(hba); if (ret) @@ -3549,11 +3751,27 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ret) goto out; + /* UFS device is also active now */ + ufshcd_set_ufs_dev_active(hba); ufshcd_force_reset_auto_bkops(hba); hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + hba->wlun_dev_clr_ua = true; + + ufshcd_config_max_pwr_mode(hba); + + /* + * If we are in error handling context or in power management callbacks + * context, no need to scan the host + */ + if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { + bool flag; + + /* clear any previous UFS device information */ + memset(&hba->dev_info, 0, sizeof(hba->dev_info)); + if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) + hba->dev_info.f_power_on_wp_en = flag; - /* If we are in error handling context no need to scan the host */ - if (!ufshcd_eh_in_progress(hba)) { if (!hba->is_init_prefetch) ufshcd_init_icc_levels(hba); @@ -3573,8 +3791,10 @@ out: * If we failed to initialize the device or the device is not * present, turn off the power/clocks etc. */ - if (ret && !ufshcd_eh_in_progress(hba)) + if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { + pm_runtime_put_sync(hba->dev); ufshcd_hba_exit(hba); + } return ret; } @@ -3609,6 +3829,42 @@ static struct scsi_host_template ufshcd_driver_template = { .can_queue = UFSHCD_CAN_QUEUE, }; +static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, + int ua) +{ + int ret = 0; + struct regulator *reg = vreg->reg; + const char *name = vreg->name; + + BUG_ON(!vreg); + + ret = regulator_set_optimum_mode(reg, ua); + if (ret >= 0) { + /* + * regulator_set_optimum_mode() returns new regulator + * mode upon success. + */ + ret = 0; + } else { + dev_err(dev, "%s: %s set optimum mode(ua=%d) failed, err=%d\n", + __func__, name, ua, ret); + } + + return ret; +} + +static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, + struct ufs_vreg *vreg) +{ + return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); +} + +static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, + struct ufs_vreg *vreg) +{ + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); +} + static int ufshcd_config_vreg(struct device *dev, struct ufs_vreg *vreg, bool on) { @@ -3629,18 +3885,9 @@ static int ufshcd_config_vreg(struct device *dev, } uA_load = on ? vreg->max_uA : 0; - ret = regulator_set_optimum_mode(reg, uA_load); - if (ret >= 0) { - /* - * regulator_set_optimum_mode() returns new regulator - * mode upon success. - */ - ret = 0; - } else { - dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n", - __func__, name, uA_load, ret); + ret = ufshcd_config_vreg_load(dev, vreg, uA_load); + if (ret) goto out; - } } out: return ret; @@ -3776,7 +4023,8 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba) return 0; } -static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) +static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, + bool skip_ref_clk) { int ret = 0; struct ufs_clk_info *clki; @@ -3787,6 +4035,9 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk)) { + if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) + continue; + if (on && !clki->enabled) { ret = clk_prepare_enable(clki->clk); if (ret) { @@ -3812,6 +4063,11 @@ out: return ret; } +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) +{ + return __ufshcd_setup_clocks(hba, on, false); +} + static int ufshcd_init_clocks(struct ufs_hba *hba) { int ret = 0; @@ -3968,68 +4224,532 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) } } +static int +ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) +{ + unsigned char cmd[6] = {REQUEST_SENSE, + 0, + 0, + 0, + SCSI_SENSE_BUFFERSIZE, + 0}; + char *buffer; + int ret; + + buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + goto out; + } + + ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, + SCSI_SENSE_BUFFERSIZE, NULL, + msecs_to_jiffies(1000), 3, NULL, REQ_PM); + if (ret) + pr_err("%s: failed with err %d\n", __func__, ret); + + kfree(buffer); +out: + return ret; +} + +/** + * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device + * power mode + * @hba: per adapter instance + * @pwr_mode: device power mode to set + * + * Returns 0 if requested power mode is set successfully + * Returns non-zero if failed to set the requested power mode + */ +static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, + enum ufs_dev_pwr_mode pwr_mode) +{ + unsigned char cmd[6] = { START_STOP }; + struct scsi_sense_hdr sshdr; + struct scsi_device *sdp = hba->sdev_ufs_device; + int ret; + + if (!sdp || !scsi_device_online(sdp)) + return -ENODEV; + + /* + * If scsi commands fail, the scsi mid-layer schedules scsi error- + * handling, which would wait for host to be resumed. Since we know + * we are functional while we are here, skip host resume in error + * handling context. + */ + hba->host->eh_noresume = 1; + if (hba->wlun_dev_clr_ua) { + ret = ufshcd_send_request_sense(hba, sdp); + if (ret) + goto out; + /* Unit attention condition is cleared now */ + hba->wlun_dev_clr_ua = false; + } + + cmd[4] = pwr_mode << 4; + + /* + * Current function would be generally called from the power management + * callbacks hence set the REQ_PM flag so that it doesn't resume the + * already suspended childs. + */ + ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, + START_STOP_TIMEOUT, 0, NULL, REQ_PM); + if (ret) { + sdev_printk(KERN_WARNING, sdp, + "START_STOP failed for power mode: %d\n", pwr_mode); + scsi_show_result(ret); + if (driver_byte(ret) & DRIVER_SENSE) { + scsi_show_sense_hdr(&sshdr); + scsi_show_extd_sense(sshdr.asc, sshdr.ascq); + } + } + + if (!ret) + hba->curr_dev_pwr_mode = pwr_mode; +out: + hba->host->eh_noresume = 0; + return ret; +} + +static int ufshcd_link_state_transition(struct ufs_hba *hba, + enum uic_link_state req_link_state, + int check_for_bkops) +{ + int ret = 0; + + if (req_link_state == hba->uic_link_state) + return 0; + + if (req_link_state == UIC_LINK_HIBERN8_STATE) { + ret = ufshcd_uic_hibern8_enter(hba); + if (!ret) + ufshcd_set_link_hibern8(hba); + else + goto out; + } + /* + * If autobkops is enabled, link can't be turned off because + * turning off the link would also turn off the device. + */ + else if ((req_link_state == UIC_LINK_OFF_STATE) && + (!check_for_bkops || (check_for_bkops && + !hba->auto_bkops_enabled))) { + /* + * Change controller state to "reset state" which + * should also put the link in off/reset state + */ + ufshcd_hba_stop(hba); + /* + * TODO: Check if we need any delay to make sure that + * controller is reset + */ + ufshcd_set_link_off(hba); + } + +out: + return ret; +} + +static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) +{ + /* + * If UFS device is either in UFS_Sleep turn off VCC rail to save some + * power. + * + * If UFS device and link is in OFF state, all power supplies (VCC, + * VCCQ, VCCQ2) can be turned off if power on write protect is not + * required. If UFS link is inactive (Hibern8 or OFF state) and device + * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. + * + * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway + * in low power state which would save some power. + */ + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && + !hba->dev_info.is_lu_power_on_wp) { + ufshcd_setup_vreg(hba, false); + } else if (!ufshcd_is_ufs_dev_active(hba)) { + ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); + if (!ufshcd_is_link_active(hba)) { + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); + } + } +} + +static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) +{ + int ret = 0; + + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && + !hba->dev_info.is_lu_power_on_wp) { + ret = ufshcd_setup_vreg(hba, true); + } else if (!ufshcd_is_ufs_dev_active(hba)) { + ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); + if (!ret && !ufshcd_is_link_active(hba)) { + ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); + if (ret) + goto vcc_disable; + ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); + if (ret) + goto vccq_lpm; + } + } + goto out; + +vccq_lpm: + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); +vcc_disable: + ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); +out: + return ret; +} + +static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) +{ + if (ufshcd_is_link_off(hba)) + ufshcd_setup_hba_vreg(hba, false); +} + +static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) +{ + if (ufshcd_is_link_off(hba)) + ufshcd_setup_hba_vreg(hba, true); +} + /** - * ufshcd_suspend - suspend power management function + * ufshcd_suspend - helper function for suspend operations * @hba: per adapter instance - * @state: power state + * @pm_op: desired low power operation type + * + * This function will try to put the UFS device and link into low power + * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl" + * (System PM level). + * + * If this function is called during shutdown, it will make sure that + * both UFS device and UFS link is powered off. * - * Returns -ENOSYS + * NOTE: UFS device & link must be active before we enter in this function. + * + * Returns 0 for success and non-zero for failure */ -int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state) +static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) { + int ret = 0; + enum ufs_pm_level pm_lvl; + enum ufs_dev_pwr_mode req_dev_pwr_mode; + enum uic_link_state req_link_state; + + hba->pm_op_in_progress = 1; + if (!ufshcd_is_shutdown_pm(pm_op)) { + pm_lvl = ufshcd_is_runtime_pm(pm_op) ? + hba->rpm_lvl : hba->spm_lvl; + req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); + req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); + } else { + req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; + req_link_state = UIC_LINK_OFF_STATE; + } + /* - * TODO: - * 1. Block SCSI requests from SCSI midlayer - * 2. Change the internal driver state to non operational - * 3. Set UTRLRSR and UTMRLRSR bits to zero - * 4. Wait until outstanding commands are completed - * 5. Set HCE to zero to send the UFS host controller to reset state + * If we can't transition into any of the low power modes + * just gate the clocks. */ + if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && + req_link_state == UIC_LINK_ACTIVE_STATE) { + goto disable_clks; + } - return -ENOSYS; + if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && + (req_link_state == hba->uic_link_state)) + goto out; + + /* UFS device & link must be active before we enter in this function */ + if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { + ret = -EINVAL; + goto out; + } + + if (ufshcd_is_runtime_pm(pm_op)) { + /* + * The device is idle with no requests in the queue, + * allow background operations if needed. + */ + ret = ufshcd_bkops_ctrl(hba, BKOPS_STATUS_NON_CRITICAL); + if (ret) + goto out; + } + + if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && + ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || + !ufshcd_is_runtime_pm(pm_op))) { + /* ensure that bkops is disabled */ + ufshcd_disable_auto_bkops(hba); + ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); + if (ret) + goto out; + } + + ret = ufshcd_link_state_transition(hba, req_link_state, 1); + if (ret) + goto set_dev_active; + + ufshcd_vreg_set_lpm(hba); + +disable_clks: + /* + * Call vendor specific suspend callback. As these callbacks may access + * vendor specific host controller register space call them before the + * host clocks are ON. + */ + if (hba->vops && hba->vops->suspend) { + ret = hba->vops->suspend(hba, pm_op); + if (ret) + goto set_link_active; + } + + if (hba->vops && hba->vops->setup_clocks) { + ret = hba->vops->setup_clocks(hba, false); + if (ret) + goto vops_resume; + } + + if (!ufshcd_is_link_active(hba)) + ufshcd_setup_clocks(hba, false); + else + /* If link is active, device ref_clk can't be switched off */ + __ufshcd_setup_clocks(hba, false, true); + + /* + * Disable the host irq as host controller as there won't be any + * host controller trasanction expected till resume. + */ + ufshcd_disable_irq(hba); + /* Put the host controller in low power mode if possible */ + ufshcd_hba_vreg_set_lpm(hba); + goto out; + +vops_resume: + if (hba->vops && hba->vops->resume) + hba->vops->resume(hba, pm_op); +set_link_active: + ufshcd_vreg_set_hpm(hba); + if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) + ufshcd_set_link_active(hba); + else if (ufshcd_is_link_off(hba)) + ufshcd_host_reset_and_restore(hba); +set_dev_active: + if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) + ufshcd_disable_auto_bkops(hba); +out: + hba->pm_op_in_progress = 0; + return ret; } -EXPORT_SYMBOL_GPL(ufshcd_suspend); /** - * ufshcd_resume - resume power management function + * ufshcd_resume - helper function for resume operations * @hba: per adapter instance + * @pm_op: runtime PM or system PM * - * Returns -ENOSYS + * This function basically brings the UFS device, UniPro link and controller + * to active state. + * + * Returns 0 for success and non-zero for failure */ -int ufshcd_resume(struct ufs_hba *hba) +static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { + int ret; + enum uic_link_state old_link_state; + + hba->pm_op_in_progress = 1; + old_link_state = hba->uic_link_state; + + ufshcd_hba_vreg_set_hpm(hba); + /* Make sure clocks are enabled before accessing controller */ + ret = ufshcd_setup_clocks(hba, true); + if (ret) + goto out; + + if (hba->vops && hba->vops->setup_clocks) { + ret = hba->vops->setup_clocks(hba, true); + if (ret) + goto disable_clks; + } + + /* enable the host irq as host controller would be active soon */ + ret = ufshcd_enable_irq(hba); + if (ret) + goto disable_irq_and_vops_clks; + + ret = ufshcd_vreg_set_hpm(hba); + if (ret) + goto disable_irq_and_vops_clks; + /* - * TODO: - * 1. Set HCE to 1, to start the UFS host controller - * initialization process - * 2. Set UTRLRSR and UTMRLRSR bits to 1 - * 3. Change the internal driver state to operational - * 4. Unblock SCSI requests from SCSI midlayer + * Call vendor specific resume callback. As these callbacks may access + * vendor specific host controller register space call them when the + * host clocks are ON. */ + if (hba->vops && hba->vops->resume) { + ret = hba->vops->resume(hba, pm_op); + if (ret) + goto disable_vreg; + } + + if (ufshcd_is_link_hibern8(hba)) { + ret = ufshcd_uic_hibern8_exit(hba); + if (!ret) + ufshcd_set_link_active(hba); + else + goto vendor_suspend; + } else if (ufshcd_is_link_off(hba)) { + ret = ufshcd_host_reset_and_restore(hba); + /* + * ufshcd_host_reset_and_restore() should have already + * set the link state as active + */ + if (ret || !ufshcd_is_link_active(hba)) + goto vendor_suspend; + } + + if (!ufshcd_is_ufs_dev_active(hba)) { + ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); + if (ret) + goto set_old_link_state; + } + + ufshcd_disable_auto_bkops(hba); + goto out; + +set_old_link_state: + ufshcd_link_state_transition(hba, old_link_state, 0); +vendor_suspend: + if (hba->vops && hba->vops->suspend) + hba->vops->suspend(hba, pm_op); +disable_vreg: + ufshcd_vreg_set_lpm(hba); +disable_irq_and_vops_clks: + ufshcd_disable_irq(hba); + if (hba->vops && hba->vops->setup_clocks) + ret = hba->vops->setup_clocks(hba, false); +disable_clks: + ufshcd_setup_clocks(hba, false); +out: + hba->pm_op_in_progress = 0; + return ret; +} + +/** + * ufshcd_system_suspend - system suspend routine + * @hba: per adapter instance + * @pm_op: runtime PM or system PM + * + * Check the description of ufshcd_suspend() function for more details. + * + * Returns 0 for success and non-zero for failure + */ +int ufshcd_system_suspend(struct ufs_hba *hba) +{ + int ret = 0; + + if (!hba || !hba->is_powered) + goto out; + + if (pm_runtime_suspended(hba->dev)) { + if (hba->rpm_lvl == hba->spm_lvl) + /* + * There is possibility that device may still be in + * active state during the runtime suspend. + */ + if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == + hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled) + goto out; + + /* + * UFS device and/or UFS link low power states during runtime + * suspend seems to be different than what is expected during + * system suspend. Hence runtime resume the devic & link and + * let the system suspend low power states to take effect. + * TODO: If resume takes longer time, we might have optimize + * it in future by not resuming everything if possible. + */ + ret = ufshcd_runtime_resume(hba); + if (ret) + goto out; + } + + ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); +out: + return ret; +} +EXPORT_SYMBOL(ufshcd_system_suspend); + +/** + * ufshcd_system_resume - system resume routine + * @hba: per adapter instance + * + * Returns 0 for success and non-zero for failure + */ - return -ENOSYS; +int ufshcd_system_resume(struct ufs_hba *hba) +{ + if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev)) + /* + * Let the runtime resume take care of resuming + * if runtime suspended. + */ + return 0; + + return ufshcd_resume(hba, UFS_SYSTEM_PM); } -EXPORT_SYMBOL_GPL(ufshcd_resume); +EXPORT_SYMBOL(ufshcd_system_resume); +/** + * ufshcd_runtime_suspend - runtime suspend routine + * @hba: per adapter instance + * + * Check the description of ufshcd_suspend() function for more details. + * + * Returns 0 for success and non-zero for failure + */ int ufshcd_runtime_suspend(struct ufs_hba *hba) { - if (!hba) + if (!hba || !hba->is_powered) return 0; - /* - * The device is idle with no requests in the queue, - * allow background operations. - */ - return ufshcd_enable_auto_bkops(hba); + return ufshcd_suspend(hba, UFS_RUNTIME_PM); } EXPORT_SYMBOL(ufshcd_runtime_suspend); +/** + * ufshcd_runtime_resume - runtime resume routine + * @hba: per adapter instance + * + * This function basically brings the UFS device, UniPro link and controller + * to active state. Following operations are done in this function: + * + * 1. Turn on all the controller related clocks + * 2. Bring the UniPro link out of Hibernate state + * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device + * to active state. + * 4. If auto-bkops is enabled on the device, disable it. + * + * So following would be the possible power state after this function return + * successfully: + * S1: UFS device in Active state with VCC rail ON + * UniPro link in Active state + * All the UFS/UniPro controller clocks are ON + * + * Returns 0 for success and non-zero for failure + */ int ufshcd_runtime_resume(struct ufs_hba *hba) { - if (!hba) + if (!hba || !hba->is_powered) return 0; - - return ufshcd_disable_auto_bkops(hba); + else + return ufshcd_resume(hba, UFS_RUNTIME_PM); } EXPORT_SYMBOL(ufshcd_runtime_resume); @@ -4039,6 +4759,36 @@ int ufshcd_runtime_idle(struct ufs_hba *hba) } EXPORT_SYMBOL(ufshcd_runtime_idle); +/** + * ufshcd_shutdown - shutdown routine + * @hba: per adapter instance + * + * This function would power off both UFS device and UFS link. + * + * Returns 0 always to allow force shutdown even in case of errors. + */ +int ufshcd_shutdown(struct ufs_hba *hba) +{ + int ret = 0; + + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) + goto out; + + if (pm_runtime_suspended(hba->dev)) { + ret = ufshcd_runtime_resume(hba); + if (ret) + goto out; + } + + ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); +out: + if (ret) + dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); + /* allow force shutdown even in case of errors */ + return 0; +} +EXPORT_SYMBOL(ufshcd_shutdown); + /** * ufshcd_remove - de-allocate SCSI host and host memory space * data structure memory @@ -4192,6 +4942,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) if (err) { dev_err(hba->dev, "request irq failed\n"); goto out_disable; + } else { + hba->is_irq_enabled = true; } /* Enable SCSI tag mapping */ @@ -4217,6 +4969,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Hold auto suspend until async scan completes */ pm_runtime_get_sync(dev); + /* + * The device-initialize-sequence hasn't been invoked yet. + * Set the device to power-off state + */ + ufshcd_set_ufs_dev_poweroff(hba); + async_schedule(ufshcd_async_scan, hba); return 0; @@ -4224,6 +4982,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) out_remove_scsi_host: scsi_remove_host(hba->host); out_disable: + hba->is_irq_enabled = false; scsi_host_put(host); ufshcd_hba_exit(hba); out_error: diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 5c25337bfcc8..e1bde0598d92 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -96,6 +96,54 @@ struct uic_command { struct completion done; }; +/* Used to differentiate the power management options */ +enum ufs_pm_op { + UFS_RUNTIME_PM, + UFS_SYSTEM_PM, + UFS_SHUTDOWN_PM, +}; + +#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM) +#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM) +#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM) + +/* Host <-> Device UniPro Link state */ +enum uic_link_state { + UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */ + UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */ + UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */ +}; + +#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE) +#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \ + UIC_LINK_ACTIVE_STATE) +#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \ + UIC_LINK_HIBERN8_STATE) +#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE) +#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \ + UIC_LINK_ACTIVE_STATE) +#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \ + UIC_LINK_HIBERN8_STATE) + +/* + * UFS Power management levels. + * Each level is in increasing order of power savings. + */ +enum ufs_pm_level { + UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */ + UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */ + UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */ + UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */ + UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */ + UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */ + UFS_PM_LVL_MAX +}; + +struct ufs_pm_lvl_states { + enum ufs_dev_pwr_mode dev_state; + enum uic_link_state link_state; +}; + /** * struct ufshcd_lrb - local reference block * @utr_descriptor_ptr: UTRD address of the command @@ -184,6 +232,8 @@ struct ufs_clk_info { * variant specific Uni-Pro initialization. * @link_startup_notify: called before and after Link startup is carried out * to allow variant specific Uni-Pro initialization. + * @suspend: called during host controller PM callback + * @resume: called during host controller PM callback */ struct ufs_hba_variant_ops { const char *name; @@ -193,6 +243,8 @@ struct ufs_hba_variant_ops { int (*setup_regulators)(struct ufs_hba *, bool); int (*hce_enable_notify)(struct ufs_hba *, bool); int (*link_startup_notify)(struct ufs_hba *, bool); + int (*suspend)(struct ufs_hba *, enum ufs_pm_op); + int (*resume)(struct ufs_hba *, enum ufs_pm_op); }; /** @@ -274,6 +326,14 @@ struct ufs_hba { struct scsi_device *sdev_rpmb; struct scsi_device *sdev_boot; + enum ufs_dev_pwr_mode curr_dev_pwr_mode; + enum uic_link_state uic_link_state; + /* Desired UFS power management level during runtime PM */ + enum ufs_pm_level rpm_lvl; + /* Desired UFS power management level during system PM */ + enum ufs_pm_level spm_lvl; + int pm_op_in_progress; + struct ufshcd_lrb *lrb; unsigned long lrb_in_use; @@ -287,16 +347,17 @@ struct ufs_hba { struct ufs_hba_variant_ops *vops; void *priv; unsigned int irq; + bool is_irq_enabled; - struct uic_command *active_uic_cmd; - struct mutex uic_cmd_mutex; wait_queue_head_t tm_wq; wait_queue_head_t tm_tag_wq; unsigned long tm_condition; unsigned long tm_slots_in_use; - struct completion *pwr_done; + struct uic_command *active_uic_cmd; + struct mutex uic_cmd_mutex; + struct completion *uic_async_done; u32 ufshcd_state; u32 eh_flags; @@ -319,9 +380,13 @@ struct ufs_hba { /* Device management request data */ struct ufs_dev_cmd dev_cmd; + /* Keeps information of the UFS device connected to this host */ + struct ufs_dev_info dev_info; bool auto_bkops_enabled; struct ufs_vreg_info vreg_info; struct list_head clk_list_head; + + bool wlun_dev_clr_ua; }; #define ufshcd_writel(hba, val, reg) \ @@ -348,11 +413,12 @@ static inline void check_upiu_size(void) GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); } -extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state); -extern int ufshcd_resume(struct ufs_hba *hba); extern int ufshcd_runtime_suspend(struct ufs_hba *hba); extern int ufshcd_runtime_resume(struct ufs_hba *hba); extern int ufshcd_runtime_idle(struct ufs_hba *hba); +extern int ufshcd_system_suspend(struct ufs_hba *hba); +extern int ufshcd_system_resume(struct ufs_hba *hba); +extern int ufshcd_shutdown(struct ufs_hba *hba); extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, u32 mib_val, u8 peer); extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index e1b844bc9460..d5721199e9cc 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -124,8 +124,11 @@ enum { #define CONTROLLER_FATAL_ERROR UFS_BIT(16) #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) -#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\ - UIC_POWER_MODE) +#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\ + UIC_HIBERNATE_EXIT |\ + UIC_POWER_MODE) + +#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK) #define UFSHCD_ERROR_MASK (UIC_ERROR |\ DEVICE_FATAL_ERROR |\ @@ -210,7 +213,7 @@ enum { #define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) /* UIC Commands */ -enum { +enum uic_cmd_dme { UIC_CMD_DME_GET = 0x01, UIC_CMD_DME_SET = 0x02, UIC_CMD_DME_PEER_GET = 0x03, -- cgit From 7eb584db73bebbc9852a14341431ed6935419bec Mon Sep 17 00:00:00 2001 From: Dolev Raviv Date: Thu, 25 Sep 2014 15:32:31 +0300 Subject: ufs: refactor configuring power mode Sometimes, the device shall report its maximum power and speed capabilities, but we might not wish to configure it to use those maximum capabilities. This change adds support for the vendor specific host driver to implement power change notify callback. To enable configuring different power modes (number of lanes, gear number and fast/slow modes) it is necessary to split the configuration stage from the stage that reads the device max power mode. In addition, it is not required to read the configuration more than once, thus the configuration is stored after reading it once. Signed-off-by: Dolev Raviv Signed-off-by: Yaniv Gardi Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/ufshcd.c | 166 +++++++++++++++++++++++++++++++++++++--------- drivers/scsi/ufs/ufshcd.h | 27 ++++++++ 2 files changed, 160 insertions(+), 33 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index f2b50bc13ac9..8bbb37d7db41 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -179,6 +179,8 @@ static void ufshcd_hba_exit(struct ufs_hba *hba); static int ufshcd_probe_hba(struct ufs_hba *hba); static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); static irqreturn_t ufshcd_intr(int irq, void *__hba); +static int ufshcd_config_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *desired_pwr_mode); static inline int ufshcd_enable_irq(struct ufs_hba *hba) { @@ -1958,40 +1960,83 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) } /** - * ufshcd_config_max_pwr_mode - Set & Change power mode with - * maximum capability attribute information. - * @hba: per adapter instance - * - * Returns 0 on success, non-zero value on failure + * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device + * @hba: per-adapter instance */ -static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) +static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) { - enum {RX = 0, TX = 1}; - u32 lanes[] = {1, 1}; - u32 gear[] = {1, 1}; - u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE}; - int ret; + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; + + if (hba->max_pwr_info.is_valid) + return 0; + + pwr_info->pwr_tx = FASTAUTO_MODE; + pwr_info->pwr_rx = FASTAUTO_MODE; + pwr_info->hs_rate = PA_HS_MODE_B; /* Get the connected lane count */ - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]); - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), + &pwr_info->lane_rx); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &pwr_info->lane_tx); + + if (!pwr_info->lane_rx || !pwr_info->lane_tx) { + dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", + __func__, + pwr_info->lane_rx, + pwr_info->lane_tx); + return -EINVAL; + } /* * First, get the maximum gears of HS speed. * If a zero value, it means there is no HSGEAR capability. * Then, get the maximum gears of PWM speed. */ - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]); - if (!gear[RX]) { - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]); - pwr[RX] = SLOWAUTO_MODE; + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", + __func__, pwr_info->gear_rx); + return -EINVAL; + } + pwr_info->pwr_rx = SLOWAUTO_MODE; } - ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]); - if (!gear[TX]) { + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), - &gear[TX]); - pwr[TX] = SLOWAUTO_MODE; + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { + dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", + __func__, pwr_info->gear_tx); + return -EINVAL; + } + pwr_info->pwr_tx = SLOWAUTO_MODE; + } + + hba->max_pwr_info.is_valid = true; + return 0; +} + +static int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode) +{ + int ret; + + /* if already configured to the requested pwr_mode */ + if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && + pwr_mode->gear_tx == hba->pwr_info.gear_tx && + pwr_mode->lane_rx == hba->pwr_info.lane_rx && + pwr_mode->lane_tx == hba->pwr_info.lane_tx && + pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && + pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && + pwr_mode->hs_rate == hba->pwr_info.hs_rate) { + dev_dbg(hba->dev, "%s: power already configured\n", __func__); + return 0; } /* @@ -2000,23 +2045,67 @@ static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, * - PA_HSSERIES */ - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]); - if (pwr[RX] == FASTAUTO_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), + pwr_mode->lane_rx); + if (pwr_mode->pwr_rx == FASTAUTO_MODE || + pwr_mode->pwr_rx == FAST_MODE) ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]); - if (pwr[TX] == FASTAUTO_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), + pwr_mode->lane_tx); + if (pwr_mode->pwr_tx == FASTAUTO_MODE || + pwr_mode->pwr_tx == FAST_MODE) ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); - if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE) - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B); + if (pwr_mode->pwr_rx == FASTAUTO_MODE || + pwr_mode->pwr_tx == FASTAUTO_MODE || + pwr_mode->pwr_rx == FAST_MODE || + pwr_mode->pwr_tx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), + pwr_mode->hs_rate); - ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]); - if (ret) + ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 + | pwr_mode->pwr_tx); + + if (ret) { dev_err(hba->dev, - "pwr_mode: power mode change failed %d\n", ret); + "%s: power mode change failed %d\n", __func__, ret); + } else { + if (hba->vops && hba->vops->pwr_change_notify) + hba->vops->pwr_change_notify(hba, + POST_CHANGE, NULL, pwr_mode); + + memcpy(&hba->pwr_info, pwr_mode, + sizeof(struct ufs_pa_layer_attr)); + } + + return ret; +} + +/** + * ufshcd_config_pwr_mode - configure a new power mode + * @hba: per-adapter instance + * @desired_pwr_mode: desired power configuration + */ +static int ufshcd_config_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *desired_pwr_mode) +{ + struct ufs_pa_layer_attr final_params = { 0 }; + int ret; + + if (hba->vops && hba->vops->pwr_change_notify) + hba->vops->pwr_change_notify(hba, + PRE_CHANGE, desired_pwr_mode, &final_params); + else + memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); + + ret = ufshcd_change_power_mode(hba, &final_params); return ret; } @@ -3757,7 +3846,16 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; hba->wlun_dev_clr_ua = true; - ufshcd_config_max_pwr_mode(hba); + if (ufshcd_get_max_pwr_mode(hba)) { + dev_err(hba->dev, + "%s: Failed getting max supported power mode\n", + __func__); + } else { + ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + if (ret) + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + } /* * If we are in error handling context or in power management callbacks @@ -4920,6 +5018,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) host->unique_id = host->host_no; host->max_cmd_len = MAX_CDB_SIZE; + hba->max_pwr_info.is_valid = false; + /* Initailize wait queue for task management */ init_waitqueue_head(&hba->tm_wq); init_waitqueue_head(&hba->tm_tag_wq); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index e1bde0598d92..343b18a7a8b0 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -221,6 +221,22 @@ struct ufs_clk_info { #define PRE_CHANGE 0 #define POST_CHANGE 1 + +struct ufs_pa_layer_attr { + u32 gear_rx; + u32 gear_tx; + u32 lane_rx; + u32 lane_tx; + u32 pwr_rx; + u32 pwr_tx; + u32 hs_rate; +}; + +struct ufs_pwr_mode_info { + bool is_valid; + struct ufs_pa_layer_attr info; +}; + /** * struct ufs_hba_variant_ops - variant specific callbacks * @name: variant name @@ -232,6 +248,9 @@ struct ufs_clk_info { * variant specific Uni-Pro initialization. * @link_startup_notify: called before and after Link startup is carried out * to allow variant specific Uni-Pro initialization. + * @pwr_change_notify: called before and after a power mode change + * is carried out to allow vendor spesific capabilities + * to be set. * @suspend: called during host controller PM callback * @resume: called during host controller PM callback */ @@ -243,6 +262,9 @@ struct ufs_hba_variant_ops { int (*setup_regulators)(struct ufs_hba *, bool); int (*hce_enable_notify)(struct ufs_hba *, bool); int (*link_startup_notify)(struct ufs_hba *, bool); + int (*pwr_change_notify)(struct ufs_hba *, + bool, struct ufs_pa_layer_attr *, + struct ufs_pa_layer_attr *); int (*suspend)(struct ufs_hba *, enum ufs_pm_op); int (*resume)(struct ufs_hba *, enum ufs_pm_op); }; @@ -302,6 +324,8 @@ struct ufs_init_prefetch { * @auto_bkops_enabled: to track whether bkops is enabled in device * @vreg_info: UFS device voltage regulator information * @clk_list_head: UFS host controller clocks list node head + * @pwr_info: holds current power mode + * @max_pwr_info: keeps the device max valid pwm */ struct ufs_hba { void __iomem *mmio_base; @@ -387,6 +411,9 @@ struct ufs_hba { struct list_head clk_list_head; bool wlun_dev_clr_ua; + + struct ufs_pa_layer_attr pwr_info; + struct ufs_pwr_mode_info max_pwr_info; }; #define ufshcd_writel(hba, val, reg) \ -- cgit From 1ab27c9cf8b63dd8dec9e17b5c17721c7f3b6cc7 Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Thu, 25 Sep 2014 15:32:32 +0300 Subject: ufs: Add support for clock gating The UFS controller clocks can be gated after certain period of inactivity, which is typically less than runtime suspend timeout. In addition to clocks the link will also be put into Hibern8 mode to save more power. The clock gating can be turned on by enabling the capability UFSHCD_CAP_CLK_GATING. To enable entering into Hibern8 mode as part of clock gating, set the capability UFSHCD_CAP_HIBERN8_WITH_CLK_GATING. The tracing events for clock gating can be enabled through debugfs as: echo 1 > /sys/kernel/debug/tracing/events/ufs/ufshcd_clk_gating/enable cat /sys/kernel/debug/tracing/trace_pipe Signed-off-by: Sahitya Tummala Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/ufshcd.c | 323 ++++++++++++++++++++++++++++++++++++++++++---- drivers/scsi/ufs/ufshcd.h | 51 ++++++++ 2 files changed, 349 insertions(+), 25 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 8bbb37d7db41..6f1ea5192db6 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -177,6 +177,11 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); static void ufshcd_hba_exit(struct ufs_hba *hba); static int ufshcd_probe_hba(struct ufs_hba *hba); +static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, + bool skip_ref_clk); +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); +static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); +static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); static irqreturn_t ufshcd_intr(int irq, void *__hba); static int ufshcd_config_pwr_mode(struct ufs_hba *hba, @@ -507,6 +512,231 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba) return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; } +static void ufshcd_ungate_work(struct work_struct *work) +{ + int ret; + unsigned long flags; + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_gating.ungate_work); + + cancel_delayed_work_sync(&hba->clk_gating.gate_work); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.state == CLKS_ON) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + goto unblock_reqs; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_setup_clocks(hba, true); + + /* Exit from hibern8 */ + if (ufshcd_can_hibern8_during_gating(hba)) { + /* Prevent gating in this path */ + hba->clk_gating.is_suspended = true; + if (ufshcd_is_link_hibern8(hba)) { + ret = ufshcd_uic_hibern8_exit(hba); + if (ret) + dev_err(hba->dev, "%s: hibern8 exit failed %d\n", + __func__, ret); + else + ufshcd_set_link_active(hba); + } + hba->clk_gating.is_suspended = false; + } +unblock_reqs: + scsi_unblock_requests(hba->host); +} + +/** + * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. + * Also, exit from hibern8 mode and set the link as active. + * @hba: per adapter instance + * @async: This indicates whether caller should ungate clocks asynchronously. + */ +int ufshcd_hold(struct ufs_hba *hba, bool async) +{ + int rc = 0; + unsigned long flags; + + if (!ufshcd_is_clkgating_allowed(hba)) + goto out; +start: + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.active_reqs++; + + switch (hba->clk_gating.state) { + case CLKS_ON: + break; + case REQ_CLKS_OFF: + if (cancel_delayed_work(&hba->clk_gating.gate_work)) { + hba->clk_gating.state = CLKS_ON; + break; + } + /* + * If we here, it means gating work is either done or + * currently running. Hence, fall through to cancel gating + * work and to enable clocks. + */ + case CLKS_OFF: + scsi_block_requests(hba->host); + hba->clk_gating.state = REQ_CLKS_ON; + schedule_work(&hba->clk_gating.ungate_work); + /* + * fall through to check if we should wait for this + * work to be done or not. + */ + case REQ_CLKS_ON: + if (async) { + rc = -EAGAIN; + hba->clk_gating.active_reqs--; + break; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + flush_work(&hba->clk_gating.ungate_work); + /* Make sure state is CLKS_ON before returning */ + goto start; + default: + dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", + __func__, hba->clk_gating.state); + break; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +out: + return rc; +} + +static void ufshcd_gate_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_gating.gate_work.work); + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.is_suspended) { + hba->clk_gating.state = CLKS_ON; + goto rel_lock; + } + + if (hba->clk_gating.active_reqs + || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL + || hba->lrb_in_use || hba->outstanding_tasks + || hba->active_uic_cmd || hba->uic_async_done) + goto rel_lock; + + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* put the link into hibern8 mode before turning off clocks */ + if (ufshcd_can_hibern8_during_gating(hba)) { + if (ufshcd_uic_hibern8_enter(hba)) { + hba->clk_gating.state = CLKS_ON; + goto out; + } + ufshcd_set_link_hibern8(hba); + } + + if (!ufshcd_is_link_active(hba)) + ufshcd_setup_clocks(hba, false); + else + /* If link is active, device ref_clk can't be switched off */ + __ufshcd_setup_clocks(hba, false, true); + + /* + * In case you are here to cancel this work the gating state + * would be marked as REQ_CLKS_ON. In this case keep the state + * as REQ_CLKS_ON which would anyway imply that clocks are off + * and a request to turn them on is pending. By doing this way, + * we keep the state machine in tact and this would ultimately + * prevent from doing cancel work multiple times when there are + * new requests arriving before the current cancel work is done. + */ + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.state == REQ_CLKS_OFF) + hba->clk_gating.state = CLKS_OFF; + +rel_lock: + spin_unlock_irqrestore(hba->host->host_lock, flags); +out: + return; +} + +/* host lock must be held before calling this variant */ +static void __ufshcd_release(struct ufs_hba *hba) +{ + if (!ufshcd_is_clkgating_allowed(hba)) + return; + + hba->clk_gating.active_reqs--; + + if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended + || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL + || hba->lrb_in_use || hba->outstanding_tasks + || hba->active_uic_cmd || hba->uic_async_done) + return; + + hba->clk_gating.state = REQ_CLKS_OFF; + schedule_delayed_work(&hba->clk_gating.gate_work, + msecs_to_jiffies(hba->clk_gating.delay_ms)); +} + +void ufshcd_release(struct ufs_hba *hba) +{ + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + __ufshcd_release(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static ssize_t ufshcd_clkgate_delay_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms); +} + +static ssize_t ufshcd_clkgate_delay_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long flags, value; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.delay_ms = value; + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; +} + +static void ufshcd_init_clk_gating(struct ufs_hba *hba) +{ + if (!ufshcd_is_clkgating_allowed(hba)) + return; + + hba->clk_gating.delay_ms = 150; + INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); + INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); + + hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; + hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; + sysfs_attr_init(&hba->clk_gating.delay_attr.attr); + hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; + hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR; + if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); +} + +static void ufshcd_exit_clk_gating(struct ufs_hba *hba) +{ + if (!ufshcd_is_clkgating_allowed(hba)) + return; + device_remove_file(hba->dev, &hba->clk_gating.delay_attr); +} + /** * ufshcd_send_command - Send SCSI or device management commands * @hba: per adapter instance @@ -702,6 +932,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) int ret; unsigned long flags; + ufshcd_hold(hba, false); mutex_lock(&hba->uic_cmd_mutex); spin_lock_irqsave(hba->host->host_lock, flags); ret = __ufshcd_send_uic_cmd(hba, uic_cmd); @@ -711,6 +942,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) mutex_unlock(&hba->uic_cmd_mutex); + ufshcd_release(hba); return ret; } @@ -1037,6 +1269,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto out; } + err = ufshcd_hold(hba, true); + if (err) { + err = SCSI_MLQUEUE_HOST_BUSY; + clear_bit_unlock(tag, &hba->lrb_in_use); + goto out; + } + WARN_ON(hba->clk_gating.state != CLKS_ON); + lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); @@ -1312,6 +1552,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, BUG_ON(!hba); + ufshcd_hold(hba, false); mutex_lock(&hba->dev_cmd.lock); ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector); @@ -1355,6 +1596,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); return err; } @@ -1378,6 +1620,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, BUG_ON(!hba); + ufshcd_hold(hba, false); if (!attr_val) { dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", __func__, opcode); @@ -1417,6 +1660,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: mutex_unlock(&hba->dev_cmd.lock); out: + ufshcd_release(hba); return err; } @@ -1444,6 +1688,7 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba, BUG_ON(!hba); + ufshcd_hold(hba, false); if (!desc_buf) { dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", __func__, opcode); @@ -1493,6 +1738,7 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba, out_unlock: mutex_unlock(&hba->dev_cmd.lock); out: + ufshcd_release(hba); return err; } @@ -1913,6 +2159,7 @@ out: hba->uic_async_done = NULL; spin_unlock_irqrestore(hba->host->host_lock, flags); mutex_unlock(&hba->uic_cmd_mutex); + return ret; } @@ -1927,12 +2174,16 @@ out: static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) { struct uic_command uic_cmd = {0}; + int ret; uic_cmd.command = UIC_CMD_DME_SET; uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); uic_cmd.argument3 = mode; + ufshcd_hold(hba, false); + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + ufshcd_release(hba); - return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + return ret; } static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) @@ -2354,6 +2605,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) int err = 0; int retries; + ufshcd_hold(hba, false); mutex_lock(&hba->dev_cmd.lock); for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, @@ -2365,6 +2617,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); } mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); if (err) dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); @@ -2764,6 +3017,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) clear_bit_unlock(index, &hba->lrb_in_use); /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); + __ufshcd_release(hba); } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { if (hba->dev_cmd.complete) complete(hba->dev_cmd.complete); @@ -3048,6 +3302,7 @@ static void ufshcd_err_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eh_work); pm_runtime_get_sync(hba->dev); + ufshcd_hold(hba, false); spin_lock_irqsave(hba->host->host_lock, flags); if (hba->ufshcd_state == UFSHCD_STATE_RESET) { @@ -3101,6 +3356,7 @@ static void ufshcd_err_handler(struct work_struct *work) out: scsi_unblock_requests(hba->host); + ufshcd_release(hba); pm_runtime_put_sync(hba->dev); } @@ -3284,6 +3540,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, * the maximum wait time is bounded by %TM_CMD_TIMEOUT. */ wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); + ufshcd_hold(hba, false); spin_lock_irqsave(host->host_lock, flags); task_req_descp = hba->utmrdl_base_addr; @@ -3335,6 +3592,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, ufshcd_put_tm_slot(hba, free_slot); wake_up(&hba->tm_tag_wq); + ufshcd_release(hba); return err; } @@ -3417,6 +3675,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) hba = shost_priv(host); tag = cmd->request->tag; + ufshcd_hold(hba, false); /* If command is already aborted/completed, return SUCCESS */ if (!(test_bit(tag, &hba->outstanding_reqs))) goto out; @@ -3481,6 +3740,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) clear_bit_unlock(tag, &hba->lrb_in_use); wake_up(&hba->dev_cmd.tag_wq); + out: if (!err) { err = SUCCESS; @@ -3489,6 +3749,11 @@ out: err = FAILED; } + /* + * This ufshcd_release() corresponds to the original scsi cmd that got + * aborted here (as we won't get any IRQ for it). + */ + ufshcd_release(hba); return err; } @@ -3573,6 +3838,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) hba = shost_priv(cmd->device->host); + ufshcd_hold(hba, false); /* * Check if there is any race with fatal error handling. * If so, wait for it to complete. Even though fatal error @@ -3606,6 +3872,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) ufshcd_clear_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_release(hba); return err; } @@ -3925,6 +4192,7 @@ static struct scsi_host_template ufshcd_driver_template = { .sg_tablesize = SG_ALL, .cmd_per_lun = UFSHCD_CMD_PER_LUN, .can_queue = UFSHCD_CAN_QUEUE, + .max_host_blocked = 1, }; static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, @@ -4127,6 +4395,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, int ret = 0; struct ufs_clk_info *clki; struct list_head *head = &hba->clk_list_head; + unsigned long flags; if (!head || list_empty(head)) goto out; @@ -4151,12 +4420,19 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, clki->name, on ? "en" : "dis"); } } + + if (hba->vops && hba->vops->setup_clocks) + ret = hba->vops->setup_clocks(hba, on); out: if (ret) { list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) clk_disable_unprepare(clki->clk); } + } else if (!ret && on) { + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.state = CLKS_ON; + spin_unlock_irqrestore(hba->host->host_lock, flags); } return ret; } @@ -4217,23 +4493,14 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba) goto out; } - if (hba->vops->setup_clocks) { - err = hba->vops->setup_clocks(hba, true); - if (err) - goto out_exit; - } - if (hba->vops->setup_regulators) { err = hba->vops->setup_regulators(hba, true); if (err) - goto out_clks; + goto out_exit; } goto out; -out_clks: - if (hba->vops->setup_clocks) - hba->vops->setup_clocks(hba, false); out_exit: if (hba->vops->exit) hba->vops->exit(hba); @@ -4555,6 +4822,9 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) * If we can't transition into any of the low power modes * just gate the clocks. */ + ufshcd_hold(hba, false); + hba->clk_gating.is_suspended = true; + if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && req_link_state == UIC_LINK_ACTIVE_STATE) { goto disable_clks; @@ -4577,7 +4847,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) */ ret = ufshcd_bkops_ctrl(hba, BKOPS_STATUS_NON_CRITICAL); if (ret) - goto out; + goto enable_gating; } if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && @@ -4587,7 +4857,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_disable_auto_bkops(hba); ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); if (ret) - goto out; + goto enable_gating; } ret = ufshcd_link_state_transition(hba, req_link_state, 1); @@ -4620,6 +4890,7 @@ disable_clks: /* If link is active, device ref_clk can't be switched off */ __ufshcd_setup_clocks(hba, false, true); + hba->clk_gating.state = CLKS_OFF; /* * Disable the host irq as host controller as there won't be any * host controller trasanction expected till resume. @@ -4641,6 +4912,9 @@ set_link_active: set_dev_active: if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) ufshcd_disable_auto_bkops(hba); +enable_gating: + hba->clk_gating.is_suspended = false; + ufshcd_release(hba); out: hba->pm_op_in_progress = 0; return ret; @@ -4670,12 +4944,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (ret) goto out; - if (hba->vops && hba->vops->setup_clocks) { - ret = hba->vops->setup_clocks(hba, true); - if (ret) - goto disable_clks; - } - /* enable the host irq as host controller would be active soon */ ret = ufshcd_enable_irq(hba); if (ret) @@ -4719,6 +4987,10 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) } ufshcd_disable_auto_bkops(hba); + hba->clk_gating.is_suspended = false; + + /* Schedule clock gating in case of no access to UFS device yet */ + ufshcd_release(hba); goto out; set_old_link_state: @@ -4730,9 +5002,6 @@ disable_vreg: ufshcd_vreg_set_lpm(hba); disable_irq_and_vops_clks: ufshcd_disable_irq(hba); - if (hba->vops && hba->vops->setup_clocks) - ret = hba->vops->setup_clocks(hba, false); -disable_clks: ufshcd_setup_clocks(hba, false); out: hba->pm_op_in_progress = 0; @@ -4902,6 +5171,7 @@ void ufshcd_remove(struct ufs_hba *hba) scsi_host_put(hba->host); + ufshcd_exit_clk_gating(hba); ufshcd_hba_exit(hba); } EXPORT_SYMBOL_GPL(ufshcd_remove); @@ -5037,11 +5307,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Initialize device management tag acquire wait queue */ init_waitqueue_head(&hba->dev_cmd.tag_wq); + ufshcd_init_clk_gating(hba); /* IRQ registration */ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); if (err) { dev_err(hba->dev, "request irq failed\n"); - goto out_disable; + goto exit_gating; } else { hba->is_irq_enabled = true; } @@ -5050,13 +5321,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) err = scsi_init_shared_tag_map(host, host->can_queue); if (err) { dev_err(hba->dev, "init shared queue failed\n"); - goto out_disable; + goto exit_gating; } err = scsi_add_host(host, hba->dev); if (err) { dev_err(hba->dev, "scsi_add_host failed\n"); - goto out_disable; + goto exit_gating; } /* Host controller enable */ @@ -5081,6 +5352,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) out_remove_scsi_host: scsi_remove_host(hba->host); +exit_gating: + ufshcd_exit_clk_gating(hba); out_disable: hba->is_irq_enabled = false; scsi_host_put(host); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 343b18a7a8b0..29d34d3aa5ee 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -269,6 +269,38 @@ struct ufs_hba_variant_ops { int (*resume)(struct ufs_hba *, enum ufs_pm_op); }; +/* clock gating state */ +enum clk_gating_state { + CLKS_OFF, + CLKS_ON, + REQ_CLKS_OFF, + REQ_CLKS_ON, +}; + +/** + * struct ufs_clk_gating - UFS clock gating related info + * @gate_work: worker to turn off clocks after some delay as specified in + * delay_ms + * @ungate_work: worker to turn on clocks that will be used in case of + * interrupt context + * @state: the current clocks state + * @delay_ms: gating delay in ms + * @is_suspended: clk gating is suspended when set to 1 which can be used + * during suspend/resume + * @delay_attr: sysfs attribute to control delay_attr + * @active_reqs: number of requests that are pending and should be waited for + * completion before gating clocks. + */ +struct ufs_clk_gating { + struct delayed_work gate_work; + struct work_struct ungate_work; + enum clk_gating_state state; + unsigned long delay_ms; + bool is_suspended; + struct device_attribute delay_attr; + int active_reqs; +}; + /** * struct ufs_init_prefetch - contains data that is pre-fetched once during * initialization @@ -414,8 +446,25 @@ struct ufs_hba { struct ufs_pa_layer_attr pwr_info; struct ufs_pwr_mode_info max_pwr_info; + + struct ufs_clk_gating clk_gating; + /* Control to enable/disable host capabilities */ + u32 caps; + /* Allow dynamic clk gating */ +#define UFSHCD_CAP_CLK_GATING (1 << 0) + /* Allow hiberb8 with clk gating */ +#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1) }; +/* Returns true if clocks can be gated. Otherwise false */ +static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_CLK_GATING; +} +static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; +} #define ufshcd_writel(hba, val, reg) \ writel((val), (hba)->mmio_base + (reg)) #define ufshcd_readl(hba, reg) \ @@ -497,4 +546,6 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); } +int ufshcd_hold(struct ufs_hba *hba, bool async); +void ufshcd_release(struct ufs_hba *hba); #endif /* End of Header */ -- cgit From 4cff6d991e4a291cf50fe2659da2ea9ad46620bf Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Thu, 25 Sep 2014 15:32:33 +0300 Subject: ufs: Add freq-table-hz property for UFS device Add freq-table-hz propery for UFS device to keep track of frequencies supported by UFS clocks. Signed-off-by: Sahitya Tummala Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- .../devicetree/bindings/ufs/ufshcd-pltfrm.txt | 12 +++--- drivers/scsi/ufs/ufshcd-pltfrm.c | 48 ++++++++++++++++------ drivers/scsi/ufs/ufshcd.h | 2 + 3 files changed, 43 insertions(+), 19 deletions(-) diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt index fb1234e0532c..53579197eca2 100644 --- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt +++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt @@ -26,11 +26,11 @@ Optional properties: - clocks : List of phandle and clock specifier pairs - clock-names : List of clock input name strings sorted in the same order as the clocks property. -- max-clock-frequency-hz : List of maximum operating frequency stored in the same - order as the clocks property. If this property is not - defined or a value in the array is "0" then it is assumed - that the frequency is set by the parent clock or a - fixed rate clock source. +- freq-table-hz : Array of operating frequencies stored in the same + order as the clocks property. If this property is not + defined or a value in the array is "0" then it is assumed + that the frequency is set by the parent clock or a + fixed rate clock source. Note: If above properties are not defined it can be assumed that the supply regulators or clocks are always on. @@ -53,5 +53,5 @@ Example: clocks = <&core 0>, <&ref 0>, <&iface 0>; clock-names = "core_clk", "ref_clk", "iface_clk"; - max-clock-frequency-hz = <100000000 19200000 0>; + freq-table-hz = <100000000 200000000>, <0 0>, <0 0>; }; diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 2482bbac3681..8adf067ff019 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -63,6 +63,8 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) char *name; u32 *clkfreq = NULL; struct ufs_clk_info *clki; + int len = 0; + size_t sz = 0; if (!np) goto out; @@ -82,39 +84,59 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) if (cnt <= 0) goto out; - clkfreq = kzalloc(cnt * sizeof(*clkfreq), GFP_KERNEL); + if (!of_get_property(np, "freq-table-hz", &len)) { + dev_info(dev, "freq-table-hz property not specified\n"); + goto out; + } + + if (len <= 0) + goto out; + + sz = len / sizeof(*clkfreq); + if (sz != 2 * cnt) { + dev_err(dev, "%s len mismatch\n", "freq-table-hz"); + ret = -EINVAL; + goto out; + } + + clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq), + GFP_KERNEL); if (!clkfreq) { + dev_err(dev, "%s: no memory\n", "freq-table-hz"); ret = -ENOMEM; - dev_err(dev, "%s: memory alloc failed\n", __func__); goto out; } - ret = of_property_read_u32_array(np, - "max-clock-frequency-hz", clkfreq, cnt); + ret = of_property_read_u32_array(np, "freq-table-hz", + clkfreq, sz); if (ret && (ret != -EINVAL)) { - dev_err(dev, "%s: invalid max-clock-frequency-hz property, %d\n", - __func__, ret); - goto out; + dev_err(dev, "%s: error reading array %d\n", + "freq-table-hz", ret); + goto free_clkfreq; } - for (i = 0; i < cnt; i++) { + for (i = 0; i < sz; i += 2) { ret = of_property_read_string_index(np, - "clock-names", i, (const char **)&name); + "clock-names", i/2, (const char **)&name); if (ret) - goto out; + goto free_clkfreq; clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL); if (!clki) { ret = -ENOMEM; - goto out; + goto free_clkfreq; } - clki->max_freq = clkfreq[i]; + clki->min_freq = clkfreq[i]; + clki->max_freq = clkfreq[i+1]; clki->name = kstrdup(name, GFP_KERNEL); + dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz", + clki->min_freq, clki->max_freq, clki->name); list_add_tail(&clki->list, &hba->clk_list_head); } -out: +free_clkfreq: kfree(clkfreq); +out: return ret; } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 29d34d3aa5ee..ac72a6daf50e 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -209,6 +209,7 @@ struct ufs_dev_cmd { * @clk: clock node * @name: clock name * @max_freq: maximum frequency supported by the clock + * @min_freq: min frequency that can be used for clock scaling * @enabled: variable to check against multiple enable/disable */ struct ufs_clk_info { @@ -216,6 +217,7 @@ struct ufs_clk_info { struct clk *clk; const char *name; u32 max_freq; + u32 min_freq; bool enabled; }; -- cgit From 856b348305c98d4e0c8e5eafa97c61443197f8d3 Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Thu, 25 Sep 2014 15:32:34 +0300 Subject: ufs: Add support for clock scaling using devfreq framework The clocks for UFS device will be managed by generic DVFS (Dynamic Voltage and Frequency Scaling) framework within kernel. This devfreq framework works with different governors to scale the clocks. By default, UFS devices uses simple_ondemand governor which scales the clocks up if the load is more than upthreshold and scales down if the load is less than downthreshold. Signed-off-by: Sahitya Tummala Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/Kconfig | 2 + drivers/scsi/ufs/ufshcd.c | 178 +++++++++++++++++++++++++++++++++++++++++++++- drivers/scsi/ufs/ufshcd.h | 20 ++++++ 3 files changed, 199 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index f07f90179bbc..6e07b2afddeb 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -35,6 +35,8 @@ config SCSI_UFSHCD tristate "Universal Flash Storage Controller Driver Core" depends on SCSI && SCSI_DMA + select PM_DEVFREQ + select DEVFREQ_GOV_SIMPLE_ONDEMAND ---help--- This selects the support for UFS devices in Linux, say Y and make sure that you know the name of your UFS host adapter (the card diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 6f1ea5192db6..df525e3b2c19 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -38,6 +38,7 @@ */ #include +#include #include "ufshcd.h" #include "unipro.h" @@ -545,6 +546,8 @@ static void ufshcd_ungate_work(struct work_struct *work) hba->clk_gating.is_suspended = false; } unblock_reqs: + if (ufshcd_is_clkscaling_enabled(hba)) + devfreq_resume_device(hba->devfreq); scsi_unblock_requests(hba->host); } @@ -561,10 +564,10 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) if (!ufshcd_is_clkgating_allowed(hba)) goto out; -start: spin_lock_irqsave(hba->host->host_lock, flags); hba->clk_gating.active_reqs++; +start: switch (hba->clk_gating.state) { case CLKS_ON: break; @@ -596,6 +599,7 @@ start: spin_unlock_irqrestore(hba->host->host_lock, flags); flush_work(&hba->clk_gating.ungate_work); /* Make sure state is CLKS_ON before returning */ + spin_lock_irqsave(hba->host->host_lock, flags); goto start; default: dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", @@ -636,6 +640,11 @@ static void ufshcd_gate_work(struct work_struct *work) ufshcd_set_link_hibern8(hba); } + if (ufshcd_is_clkscaling_enabled(hba)) { + devfreq_suspend_device(hba->devfreq); + hba->clk_scaling.window_start_t = 0; + } + if (!ufshcd_is_link_active(hba)) ufshcd_setup_clocks(hba, false); else @@ -737,6 +746,32 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) device_remove_file(hba->dev, &hba->clk_gating.delay_attr); } +/* Must be called with host lock acquired */ +static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) +{ + if (!ufshcd_is_clkscaling_enabled(hba)) + return; + + if (!hba->clk_scaling.is_busy_started) { + hba->clk_scaling.busy_start_t = ktime_get(); + hba->clk_scaling.is_busy_started = true; + } +} + +static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) +{ + struct ufs_clk_scaling *scaling = &hba->clk_scaling; + + if (!ufshcd_is_clkscaling_enabled(hba)) + return; + + if (!hba->outstanding_reqs && scaling->is_busy_started) { + scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), + scaling->busy_start_t)); + scaling->busy_start_t = ktime_set(0, 0); + scaling->is_busy_started = false; + } +} /** * ufshcd_send_command - Send SCSI or device management commands * @hba: per adapter instance @@ -745,6 +780,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) static inline void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { + ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); } @@ -3027,6 +3063,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) /* clear corresponding bits of completed commands */ hba->outstanding_reqs ^= completed_reqs; + ufshcd_clk_scaling_update_busy(hba); + /* we might have free'd some tags above */ wake_up(&hba->dev_cmd.tag_wq); } @@ -4151,6 +4189,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (!hba->is_init_prefetch) hba->is_init_prefetch = true; + /* Resume devfreq after UFS device is detected */ + if (ufshcd_is_clkscaling_enabled(hba)) + devfreq_resume_device(hba->devfreq); + out: /* * If we failed to initialize the device or the device is not @@ -4472,6 +4514,7 @@ static int ufshcd_init_clocks(struct ufs_hba *hba) clki->max_freq, ret); goto out; } + clki->curr_freq = clki->max_freq; } dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, clki->name, clk_get_rate(clki->clk)); @@ -4867,6 +4910,15 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_vreg_set_lpm(hba); disable_clks: + /* + * The clock scaling needs access to controller registers. Hence, Wait + * for pending clock scaling work to be done before clocks are + * turned off. + */ + if (ufshcd_is_clkscaling_enabled(hba)) { + devfreq_suspend_device(hba->devfreq); + hba->clk_scaling.window_start_t = 0; + } /* * Call vendor specific suspend callback. As these callbacks may access * vendor specific host controller register space call them before the @@ -4989,6 +5041,9 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_disable_auto_bkops(hba); hba->clk_gating.is_suspended = false; + if (ufshcd_is_clkscaling_enabled(hba)) + devfreq_resume_device(hba->devfreq); + /* Schedule clock gating in case of no access to UFS device yet */ ufshcd_release(hba); goto out; @@ -5172,6 +5227,8 @@ void ufshcd_remove(struct ufs_hba *hba) scsi_host_put(hba->host); ufshcd_exit_clk_gating(hba); + if (ufshcd_is_clkscaling_enabled(hba)) + devfreq_remove_device(hba->devfreq); ufshcd_hba_exit(hba); } EXPORT_SYMBOL_GPL(ufshcd_remove); @@ -5228,6 +5285,112 @@ out_error: } EXPORT_SYMBOL(ufshcd_alloc_host); +static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + + if (!head || list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + if (scale_up && clki->max_freq) { + if (clki->curr_freq == clki->max_freq) + continue; + ret = clk_set_rate(clki->clk, clki->max_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->max_freq, ret); + break; + } + clki->curr_freq = clki->max_freq; + + } else if (!scale_up && clki->min_freq) { + if (clki->curr_freq == clki->min_freq) + continue; + ret = clk_set_rate(clki->clk, clki->min_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->min_freq, ret); + break; + } + clki->curr_freq = clki->min_freq; + } + } + dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, + clki->name, clk_get_rate(clki->clk)); + } + if (hba->vops->clk_scale_notify) + hba->vops->clk_scale_notify(hba); +out: + return ret; +} + +static int ufshcd_devfreq_target(struct device *dev, + unsigned long *freq, u32 flags) +{ + int err = 0; + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!ufshcd_is_clkscaling_enabled(hba)) + return -EINVAL; + + if (*freq == UINT_MAX) + err = ufshcd_scale_clks(hba, true); + else if (*freq == 0) + err = ufshcd_scale_clks(hba, false); + + return err; +} + +static int ufshcd_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_clk_scaling *scaling = &hba->clk_scaling; + unsigned long flags; + + if (!ufshcd_is_clkscaling_enabled(hba)) + return -EINVAL; + + memset(stat, 0, sizeof(*stat)); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (!scaling->window_start_t) + goto start_window; + + if (scaling->is_busy_started) + scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), + scaling->busy_start_t)); + + stat->total_time = jiffies_to_usecs((long)jiffies - + (long)scaling->window_start_t); + stat->busy_time = scaling->tot_busy_t; +start_window: + scaling->window_start_t = jiffies; + scaling->tot_busy_t = 0; + + if (hba->outstanding_reqs) { + scaling->busy_start_t = ktime_get(); + scaling->is_busy_started = true; + } else { + scaling->busy_start_t = ktime_set(0, 0); + scaling->is_busy_started = false; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + return 0; +} + +static struct devfreq_dev_profile ufs_devfreq_profile = { + .polling_ms = 100, + .target = ufshcd_devfreq_target, + .get_dev_status = ufshcd_devfreq_get_dev_status, +}; + /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -5337,6 +5500,19 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto out_remove_scsi_host; } + if (ufshcd_is_clkscaling_enabled(hba)) { + hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile, + "simple_ondemand", NULL); + if (IS_ERR(hba->devfreq)) { + dev_err(hba->dev, "Unable to register with devfreq %ld\n", + PTR_ERR(hba->devfreq)); + goto out_remove_scsi_host; + } + /* Suspend devfreq until the UFS device is detected */ + devfreq_suspend_device(hba->devfreq); + hba->clk_scaling.window_start_t = 0; + } + /* Hold auto suspend until async scan completes */ pm_runtime_get_sync(dev); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index ac72a6daf50e..908db3eb0609 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -210,6 +210,7 @@ struct ufs_dev_cmd { * @name: clock name * @max_freq: maximum frequency supported by the clock * @min_freq: min frequency that can be used for clock scaling + * @curr_freq: indicates the current frequency that it is set to * @enabled: variable to check against multiple enable/disable */ struct ufs_clk_info { @@ -218,6 +219,7 @@ struct ufs_clk_info { const char *name; u32 max_freq; u32 min_freq; + u32 curr_freq; bool enabled; }; @@ -244,6 +246,7 @@ struct ufs_pwr_mode_info { * @name: variant name * @init: called when the driver is initialized * @exit: called to cleanup everything done in init + * @clk_scale_notify: notifies that clks are scaled up/down * @setup_clocks: called before touching any of the controller registers * @setup_regulators: called before accessing the host controller * @hce_enable_notify: called before and after HCE enable bit is set to allow @@ -260,6 +263,7 @@ struct ufs_hba_variant_ops { const char *name; int (*init)(struct ufs_hba *); void (*exit)(struct ufs_hba *); + void (*clk_scale_notify)(struct ufs_hba *); int (*setup_clocks)(struct ufs_hba *, bool); int (*setup_regulators)(struct ufs_hba *, bool); int (*hce_enable_notify)(struct ufs_hba *, bool); @@ -303,6 +307,13 @@ struct ufs_clk_gating { int active_reqs; }; +struct ufs_clk_scaling { + ktime_t busy_start_t; + bool is_busy_started; + unsigned long tot_busy_t; + unsigned long window_start_t; +}; + /** * struct ufs_init_prefetch - contains data that is pre-fetched once during * initialization @@ -456,6 +467,11 @@ struct ufs_hba { #define UFSHCD_CAP_CLK_GATING (1 << 0) /* Allow hiberb8 with clk gating */ #define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1) + /* Allow dynamic clk scaling */ +#define UFSHCD_CAP_CLK_SCALING (1 << 2) + + struct devfreq *devfreq; + struct ufs_clk_scaling clk_scaling; }; /* Returns true if clocks can be gated. Otherwise false */ @@ -467,6 +483,10 @@ static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba) { return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; } +static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_CLK_SCALING; +} #define ufshcd_writel(hba, val, reg) \ writel((val), (hba)->mmio_base + (reg)) #define ufshcd_readl(hba, reg) \ -- cgit From 374a246e4ebda1fc55d537877bf2412e511ecc7b Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Thu, 25 Sep 2014 15:32:35 +0300 Subject: ufs: tune bkops while power managment events Add capability to control the auto bkops during suspend. If host explicitly enables the auto bkops (background operation) on device then only device would perform the bkops on its own. If auto bkops is not enabled explicitly and if the device reaches to state where it must do background operation, device would raise the urgent bkops exception event to host and then host will enable the auto bkops on device. This patch adds the option to choose whether auto bkops should be enabled during runtime suspend or not. Since we don't want to keep the device active to perform the non critical bkops, host will enable urgent bkops only. Keep auto-bkops enabled after resume if urgent bkops needed. If device bkops status shows that its in critical need of executing background operations, host should allow the device to continue doing background operations. Signed-off-by: Subhash Jadavani Signed-off-by: Dolev Raviv Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/ufshcd.c | 26 ++++++++++++++++++-------- drivers/scsi/ufs/ufshcd.h | 7 +++++++ 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index df525e3b2c19..3a2de56fe080 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -4884,13 +4884,19 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) } if (ufshcd_is_runtime_pm(pm_op)) { - /* - * The device is idle with no requests in the queue, - * allow background operations if needed. - */ - ret = ufshcd_bkops_ctrl(hba, BKOPS_STATUS_NON_CRITICAL); - if (ret) - goto enable_gating; + if (ufshcd_can_autobkops_during_suspend(hba)) { + /* + * The device is idle with no requests in the queue, + * allow background operations if bkops status shows + * that performance might be impacted. + */ + ret = ufshcd_urgent_bkops(hba); + if (ret) + goto enable_gating; + } else { + /* make sure that auto bkops is disabled */ + ufshcd_disable_auto_bkops(hba); + } } if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && @@ -5038,7 +5044,11 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto set_old_link_state; } - ufshcd_disable_auto_bkops(hba); + /* + * If BKOPs operations are urgently needed at this moment then + * keep auto-bkops enabled or else disable it. + */ + ufshcd_urgent_bkops(hba); hba->clk_gating.is_suspended = false; if (ufshcd_is_clkscaling_enabled(hba)) diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 908db3eb0609..d7fec869d590 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -469,6 +469,8 @@ struct ufs_hba { #define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1) /* Allow dynamic clk scaling */ #define UFSHCD_CAP_CLK_SCALING (1 << 2) + /* Allow auto bkops to enabled during runtime suspend */ +#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3) struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; @@ -487,6 +489,11 @@ static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba) { return hba->caps & UFSHCD_CAP_CLK_SCALING; } +static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; +} + #define ufshcd_writel(hba, val, reg) \ writel((val), (hba)->mmio_base + (reg)) #define ufshcd_readl(hba, reg) \ -- cgit From e785060ea3a1c8e37a8bc1449c79e36bff2b5b13 Mon Sep 17 00:00:00 2001 From: Dolev Raviv Date: Thu, 25 Sep 2014 15:32:36 +0300 Subject: ufs: definitions for phy interface - Adding some of the definitions missing in unipro.h, including power enumeration. - Read Modify Write Line helper function - Indication for the type of suspend Signed-off-by: Dolev Raviv Signed-off-by: Subhash Jadavani Signed-off-by: Yaniv Gardi Signed-off-by: Christoph Hellwig --- drivers/scsi/ufs/ufshcd.c | 2 ++ drivers/scsi/ufs/ufshcd.h | 18 +++++++++++++++ drivers/scsi/ufs/unipro.h | 56 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 76 insertions(+) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 3a2de56fe080..497c38a4a866 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5114,6 +5114,8 @@ int ufshcd_system_suspend(struct ufs_hba *hba) ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); out: + if (!ret) + hba->is_sys_suspended = true; return ret; } EXPORT_SYMBOL(ufshcd_system_suspend); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index d7fec869d590..58ecdff5065c 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -474,6 +474,7 @@ struct ufs_hba { struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; + bool is_sys_suspended; }; /* Returns true if clocks can be gated. Otherwise false */ @@ -499,6 +500,23 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) #define ufshcd_readl(hba, reg) \ readl((hba)->mmio_base + (reg)) +/** + * ufshcd_rmwl - read modify write into a register + * @hba - per adapter instance + * @mask - mask to apply on read value + * @val - actual value to write + * @reg - register address + */ +static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) +{ + u32 tmp; + + tmp = ufshcd_readl(hba, reg); + tmp &= ~mask; + tmp |= (val & mask); + ufshcd_writel(hba, tmp, reg); +} + int ufshcd_alloc_host(struct device *, struct ufs_hba **); int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); void ufshcd_remove(struct ufs_hba *); diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index 0bb8041c047a..3fc3e21b746b 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -12,6 +12,44 @@ #ifndef _UNIPRO_H_ #define _UNIPRO_H_ +/* + * M-TX Configuration Attributes + */ +#define TX_MODE 0x0021 +#define TX_HSRATE_SERIES 0x0022 +#define TX_HSGEAR 0x0023 +#define TX_PWMGEAR 0x0024 +#define TX_AMPLITUDE 0x0025 +#define TX_HS_SLEWRATE 0x0026 +#define TX_SYNC_SOURCE 0x0027 +#define TX_HS_SYNC_LENGTH 0x0028 +#define TX_HS_PREPARE_LENGTH 0x0029 +#define TX_LS_PREPARE_LENGTH 0x002A +#define TX_HIBERN8_CONTROL 0x002B +#define TX_LCC_ENABLE 0x002C +#define TX_PWM_BURST_CLOSURE_EXTENSION 0x002D +#define TX_BYPASS_8B10B_ENABLE 0x002E +#define TX_DRIVER_POLARITY 0x002F +#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE 0x0030 +#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE 0x0031 +#define TX_LCC_SEQUENCER 0x0032 +#define TX_MIN_ACTIVATETIME 0x0033 +#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034 + +/* + * M-RX Configuration Attributes + */ +#define RX_MODE 0x00A1 +#define RX_HSRATE_SERIES 0x00A2 +#define RX_HSGEAR 0x00A3 +#define RX_PWMGEAR 0x00A4 +#define RX_LS_TERMINATED_ENABLE 0x00A5 +#define RX_HS_UNTERMINATED_ENABLE 0x00A6 +#define RX_ENTER_HIBERN8 0x00A7 +#define RX_BYPASS_8B10B_ENABLE 0x00A8 +#define RX_TERMINATION_FORCE_ENABLE 0x0089 + +#define is_mphy_tx_attr(attr) (attr < RX_MODE) /* * PHY Adpater attributes */ @@ -87,6 +125,24 @@ enum { PA_HS_MODE_B = 2, }; +enum ufs_pwm_gear_tag { + UFS_PWM_DONT_CHANGE, /* Don't change Gear */ + UFS_PWM_G1, /* PWM Gear 1 (default for reset) */ + UFS_PWM_G2, /* PWM Gear 2 */ + UFS_PWM_G3, /* PWM Gear 3 */ + UFS_PWM_G4, /* PWM Gear 4 */ + UFS_PWM_G5, /* PWM Gear 5 */ + UFS_PWM_G6, /* PWM Gear 6 */ + UFS_PWM_G7, /* PWM Gear 7 */ +}; + +enum ufs_hs_gear_tag { + UFS_HS_DONT_CHANGE, /* Don't change Gear */ + UFS_HS_G1, /* HS Gear 1 (default for reset) */ + UFS_HS_G2, /* HS Gear 2 */ + UFS_HS_G3, /* HS Gear 3 */ +}; + /* * Data Link Layer Attributes */ -- cgit From 24c20f10583647e30afe87b6f6d5e14bc7b1cbc6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 30 Sep 2014 16:43:46 +0200 Subject: scsi: add a CONFIG_SCSI_MQ_DEFAULT option Add a Kconfig option to enable the blk-mq path for SCSI by default to ease testing and deployment in setups that know they benefit from blk-mq. Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Reviewed-by: Robert Elliott Tested-by: Robert Elliott --- drivers/scsi/Kconfig | 11 +++++++++++ drivers/scsi/scsi.c | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 18a3358eb1d4..71b0877245cb 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -45,6 +45,17 @@ config SCSI_NETLINK default n select NET +config SCSI_MQ_DEFAULT + bool "SCSI: use blk-mq I/O path by default" + depends on SCSI + ---help--- + This option enables the new blk-mq based I/O path for SCSI + devices by default. With the option the scsi_mod.use_blk_mq + module/boot option defaults to Y, without it to N, but it can + still be overriden either way. + + If unsure say N. + config SCSI_PROC_FS bool "legacy /proc/scsi/ support" depends on SCSI && PROC_FS diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 1423cb17fbfd..79c77b485a67 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -1367,7 +1367,11 @@ MODULE_LICENSE("GPL"); module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); +#ifdef CONFIG_SCSI_MQ_DEFAULT +bool scsi_use_blk_mq = true; +#else bool scsi_use_blk_mq = false; +#endif module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); static int __init init_scsi(void) -- cgit