summaryrefslogtreecommitdiff
path: root/drivers/scsi/hpsa.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/hpsa.c')
-rw-r--r--drivers/scsi/hpsa.c38
1 files changed, 14 insertions, 24 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8df70c92911d..f4d3747cfa0b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3881,8 +3881,6 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h,
u8 sense_key, asc, ascq;
int sense_len;
int rc, ldstat = 0;
- u16 cmd_status;
- u8 scsi_status;
#define ASC_LUN_NOT_READY 0x04
#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
@@ -3902,8 +3900,6 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h,
else
sense_len = c->err_info->SenseLen;
decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
- cmd_status = c->err_info->CommandStatus;
- scsi_status = c->err_info->ScsiStatus;
cmd_free(h, c);
/* Determine the reason for not ready state */
@@ -4351,7 +4347,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
u32 ndev_allocated = 0;
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
int ncurrent = 0;
- int i, n_ext_target_devs, ndevs_to_allocate;
+ int i, ndevs_to_allocate;
int raid_ctlr_position;
bool physical_device;
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
@@ -4416,7 +4412,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
raid_ctlr_position = nphysicals + nlogicals;
/* adjust our table of devices */
- n_ext_target_devs = 0;
for (i = 0; i < nphysicals + nlogicals + 1; i++) {
u8 *lunaddrbytes, is_OBDR = 0;
int rc = 0;
@@ -4580,7 +4575,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
struct scsi_cmnd *cmd)
{
struct scatterlist *sg;
- int use_sg, i, sg_limit, chained, last_sg;
+ int use_sg, i, sg_limit, chained;
struct SGDescriptor *curr_sg;
BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
@@ -4602,7 +4597,6 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
curr_sg = cp->SG;
chained = use_sg > h->max_cmd_sg_entries;
sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
- last_sg = scsi_sg_count(cmd) - 1;
scsi_for_each_sg(cmd, sg, sg_limit, i) {
hpsa_set_sg_descriptor(curr_sg, sg);
curr_sg++;
@@ -7442,7 +7436,6 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
dev_warn(&pdev->dev,
"base address is invalid\n");
return -1;
- break;
}
}
if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
@@ -8636,7 +8629,7 @@ static struct ctlr_info *hpda_alloc_ctlr_info(void)
static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int dac, rc;
+ int rc;
struct ctlr_info *h;
int try_soft_reset = 0;
unsigned long flags;
@@ -8712,13 +8705,9 @@ reinit_after_soft_reset:
/* configure PCI DMA stuff */
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (rc == 0) {
- dac = 1;
- } else {
+ if (rc != 0) {
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc == 0) {
- dac = 0;
- } else {
+ if (rc != 0) {
dev_err(&pdev->dev, "no suitable DMA available\n");
goto clean3; /* shost, pci, lu, aer/h */
}
@@ -9092,25 +9081,27 @@ static void hpsa_remove_one(struct pci_dev *pdev)
hpda_free_ctlr_info(h); /* init_one 1 */
}
-static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
- __attribute__((unused)) pm_message_t state)
+static int __maybe_unused hpsa_suspend(
+ __attribute__((unused)) struct device *dev)
{
return -ENOSYS;
}
-static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
+static int __maybe_unused hpsa_resume
+ (__attribute__((unused)) struct device *dev)
{
return -ENOSYS;
}
+static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume);
+
static struct pci_driver hpsa_pci_driver = {
.name = HPSA,
.probe = hpsa_init_one,
.remove = hpsa_remove_one,
.id_table = hpsa_pci_device_id, /* id_table */
.shutdown = hpsa_shutdown,
- .suspend = hpsa_suspend,
- .resume = hpsa_resume,
+ .driver.pm = &hpsa_pm_ops,
};
/* Fill in bucket_map[], given nsgs (the max number of
@@ -9299,10 +9290,9 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
} else if (trans_support & CFGTBL_Trans_io_accel2) {
u64 cfg_offset, cfg_base_addr_index;
u32 bft2_offset, cfg_base_addr;
- int rc;
- rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
- &cfg_base_addr_index, &cfg_offset);
+ hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,