summaryrefslogtreecommitdiff
path: root/drivers/scsi/ibmvscsi/ibmvfc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvfc.c')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c62
1 files changed, 38 insertions, 24 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 05b126bfd18b..4c493b06062a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -37,6 +37,7 @@ static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
static u64 max_lun = IBMVFC_MAX_LUN;
static unsigned int max_targets = IBMVFC_MAX_TARGETS;
static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
+static u16 max_sectors = IBMVFC_MAX_SECTORS;
static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
@@ -83,6 +84,9 @@ MODULE_PARM_DESC(default_timeout,
module_param_named(max_requests, max_requests, uint, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
"[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
+module_param_named(max_sectors, max_sectors, ushort, S_IRUGO);
+MODULE_PARM_DESC(max_sectors, "Maximum sectors for this adapter. "
+ "[Default=" __stringify(IBMVFC_MAX_SECTORS) "]");
module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
"[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
@@ -1106,7 +1110,7 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
} else
evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
- del_timer(&evt->timer);
+ timer_delete(&evt->timer);
}
/**
@@ -1494,7 +1498,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
memset(login_info, 0, sizeof(*login_info));
login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
- login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
+ login_info->max_dma_len = cpu_to_be64(max_sectors << 9);
login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
login_info->partition_num = cpu_to_be32(vhost->partition_number);
@@ -1750,7 +1754,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
atomic_set(&evt->active, 0);
list_del(&evt->queue_list);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
- del_timer(&evt->timer);
+ timer_delete(&evt->timer);
/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
* Firmware will send a CRQ with a transport event (0xFF) to
@@ -3389,7 +3393,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
/**
- * ibmvfc_slave_alloc - Setup the device's task set value
+ * ibmvfc_sdev_init - Setup the device's task set value
* @sdev: struct scsi_device device to configure
*
* Set the device's task set value so that error handling works as
@@ -3398,7 +3402,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
* Returns:
* 0 on success / -ENXIO if device does not exist
**/
-static int ibmvfc_slave_alloc(struct scsi_device *sdev)
+static int ibmvfc_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -3437,8 +3441,9 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
}
/**
- * ibmvfc_slave_configure - Configure the device
+ * ibmvfc_sdev_configure - Configure the device
* @sdev: struct scsi_device device to configure
+ * @lim: Request queue limits
*
* Enable allow_restart for a device if it is a disk. Adjust the
* queue_depth here also.
@@ -3446,7 +3451,8 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
* Returns:
* 0
**/
-static int ibmvfc_slave_configure(struct scsi_device *sdev)
+static int ibmvfc_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
unsigned long flags = 0;
@@ -3635,7 +3641,7 @@ static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
* number of bytes printed to buffer
**/
static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3658,13 +3664,13 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute ibmvfc_trace_attr = {
+static const struct bin_attribute ibmvfc_trace_attr = {
.attr = {
.name = "trace",
.mode = S_IRUGO,
},
.size = 0,
- .read = ibmvfc_read_trace,
+ .read_new = ibmvfc_read_trace,
};
#endif
@@ -3692,8 +3698,8 @@ static const struct scsi_host_template driver_template = {
.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
- .slave_alloc = ibmvfc_slave_alloc,
- .slave_configure = ibmvfc_slave_configure,
+ .sdev_init = ibmvfc_sdev_init,
+ .sdev_configure = ibmvfc_sdev_configure,
.target_alloc = ibmvfc_target_alloc,
.scan_finished = ibmvfc_scan_finished,
.change_queue_depth = ibmvfc_change_queue_depth,
@@ -3826,7 +3832,7 @@ static void ibmvfc_tasklet(void *data)
spin_unlock_irqrestore(vhost->host->host_lock, flags);
list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
- del_timer(&evt->timer);
+ timer_delete(&evt->timer);
list_del(&evt->queue_list);
ibmvfc_trc_end(evt);
evt->done(evt);
@@ -3932,7 +3938,7 @@ static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
spin_unlock_irqrestore(scrq->q_lock, flags);
list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
- del_timer(&evt->timer);
+ timer_delete(&evt->timer);
list_del(&evt->queue_list);
ibmvfc_trc_end(evt);
evt->done(evt);
@@ -4536,7 +4542,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
- del_timer(&tgt->timer);
+ timer_delete(&tgt->timer);
switch (status) {
case IBMVFC_MAD_SUCCESS:
@@ -4735,7 +4741,7 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
vhost->discovery_threads--;
- del_timer(&tgt->timer);
+ timer_delete(&tgt->timer);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
@@ -5230,7 +5236,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
}
vhost->logged_in = 1;
- npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
+ npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), max_sectors);
dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
rsp->partition_name, rsp->device_name, rsp->port_loc_code,
rsp->drc_name, npiv_max_sectors);
@@ -5513,7 +5519,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
fc_remote_port_delete(rport);
- del_timer_sync(&tgt->timer);
+ timer_delete_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
@@ -5541,8 +5547,6 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
rport->supported_classes |= FC_COS_CLASS2;
if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
rport->supported_classes |= FC_COS_CLASS3;
- if (rport->rqst_q)
- blk_queue_max_segments(rport->rqst_q, 1);
} else
tgt_dbg(tgt, "rport add failed\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -5668,7 +5672,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
- del_timer_sync(&tgt->timer);
+ timer_delete_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
@@ -6331,7 +6335,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
shost->can_queue = scsi_qdepth;
shost->max_lun = max_lun;
shost->max_id = max_targets;
- shost->max_sectors = IBMVFC_MAX_SECTORS;
+ shost->max_sectors = max_sectors;
shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
shost->unique_id = shost->host_no;
shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
@@ -6391,8 +6395,6 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
ibmvfc_init_sub_crqs(vhost);
- if (shost_to_fc_host(shost)->rqst_q)
- blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
dev_set_drvdata(dev, vhost);
spin_lock(&ibmvfc_driver_lock);
list_add_tail(&vhost->queue, &ibmvfc_head);
@@ -6547,6 +6549,7 @@ static struct fc_function_template ibmvfc_transport_functions = {
.get_starget_port_id = ibmvfc_get_starget_port_id,
.show_starget_port_id = 1,
+ .max_bsg_segments = 1,
.bsg_request = ibmvfc_bsg_request,
.bsg_timeout = ibmvfc_bsg_timeout,
};
@@ -6559,6 +6562,7 @@ static struct fc_function_template ibmvfc_transport_functions = {
**/
static int __init ibmvfc_module_init(void)
{
+ int min_max_sectors = PAGE_SIZE >> 9;
int rc;
if (!firmware_has_feature(FW_FEATURE_VIO))
@@ -6567,6 +6571,16 @@ static int __init ibmvfc_module_init(void)
printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
+ /*
+ * Range check the max_sectors module parameter. The upper bounds is
+ * implicity checked since the parameter is a ushort.
+ */
+ if (max_sectors < min_max_sectors) {
+ printk(KERN_ERR IBMVFC_NAME ": max_sectors must be at least %d.\n",
+ min_max_sectors);
+ max_sectors = min_max_sectors;
+ }
+
ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
if (!ibmvfc_transport_template)
return -ENOMEM;