summaryrefslogtreecommitdiff
path: root/drivers/scsi/scsi_scan.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_scan.c')
-rw-r--r--drivers/scsi/scsi_scan.c521
1 files changed, 374 insertions, 147 deletions
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index dd0d516f65e2..7acbfcfc2172 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -35,7 +35,7 @@
#include <linux/spinlock.h>
#include <linux/async.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns,
#define SCSI_SCAN_TYPE_DEFAULT "sync"
#endif
-char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
+static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
S_IRUGO|S_IWUSR);
@@ -122,6 +122,22 @@ struct async_scan_data {
struct completion prev_finished;
};
+/*
+ * scsi_enable_async_suspend - Enable async suspend and resume
+ */
+void scsi_enable_async_suspend(struct device *dev)
+{
+ /*
+ * If a user has disabled async probing a likely reason is due to a
+ * storage enclosure that does not inject staggered spin-ups. For
+ * safety, make resume synchronous as well in that case.
+ */
+ if (strncmp(scsi_scan_type, "async", 5) != 0)
+ return;
+ /* Enable asynchronous suspend and resume. */
+ device_enable_async_suspend(dev);
+}
+
/**
* scsi_complete_async_scans - Wait for asynchronous scans to complete
*
@@ -135,8 +151,9 @@ int scsi_complete_async_scans(void)
struct async_scan_data *data;
do {
- if (list_empty(&scanning_hosts))
- return 0;
+ scoped_guard(spinlock, &async_scan_lock)
+ if (list_empty(&scanning_hosts))
+ return 0;
/* If we can't get memory immediately, that's OK. Just
* sleep a little. Even if we never get memory, the async
* scans will finish eventually.
@@ -194,15 +211,63 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
scsi_cmd[3] = 0;
scsi_cmd[4] = 0x2a; /* size */
scsi_cmd[5] = 0;
- scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
+ scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a,
SCSI_TIMEOUT, 3, NULL);
}
+static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ unsigned int depth)
+{
+ int new_shift = sbitmap_calculate_shift(depth);
+ bool need_alloc = !sdev->budget_map.map;
+ bool need_free = false;
+ unsigned int memflags;
+ int ret;
+ struct sbitmap sb_backup;
+
+ depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
+
+ /*
+ * realloc if new shift is calculated, which is caused by setting
+ * up one new default queue depth after calling ->sdev_configure
+ */
+ if (!need_alloc && new_shift != sdev->budget_map.shift)
+ need_alloc = need_free = true;
+
+ if (!need_alloc)
+ return 0;
+
+ /*
+ * Request queue has to be frozen for reallocating budget map,
+ * and here disk isn't added yet, so freezing is pretty fast
+ */
+ if (need_free) {
+ memflags = blk_mq_freeze_queue(sdev->request_queue);
+ sb_backup = sdev->budget_map;
+ }
+ ret = sbitmap_init_node(&sdev->budget_map,
+ scsi_device_max_queue_depth(sdev),
+ new_shift, GFP_NOIO,
+ sdev->request_queue->node, false, true);
+ if (!ret)
+ sbitmap_resize(&sdev->budget_map, depth);
+
+ if (need_free) {
+ if (ret)
+ sdev->budget_map = sb_backup;
+ else
+ sbitmap_free(&sb_backup);
+ ret = 0;
+ blk_mq_unfreeze_queue(sdev->request_queue, memflags);
+ }
+ return ret;
+}
+
/**
* scsi_alloc_sdev - allocate and setup a scsi_Device
* @starget: which target to allocate a &scsi_device for
* @lun: which lun
- * @hostdata: usually NULL and set by ->slave_alloc instead
+ * @hostdata: usually NULL and set by ->sdev_init instead
*
* Description:
* Allocate, initialize for io, and return a pointer to a scsi_Device.
@@ -215,12 +280,15 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
u64 lun, void *hostdata)
{
+ unsigned int depth;
struct scsi_device *sdev;
+ struct request_queue *q;
int display_failure_msg = 1, ret;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct queue_limits lim;
sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!sdev)
goto out;
@@ -236,7 +304,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sdev_state = SDEV_CREATED;
INIT_LIST_HEAD(&sdev->siblings);
INIT_LIST_HEAD(&sdev->same_target_siblings);
- INIT_LIST_HEAD(&sdev->cmd_list);
INIT_LIST_HEAD(&sdev->starved_entry);
INIT_LIST_HEAD(&sdev->event_list);
spin_lock_init(&sdev->list_lock);
@@ -247,11 +314,11 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sdev_gendev.parent = get_device(&starget->dev);
sdev->sdev_target = starget;
- /* usually NULL and set by ->slave_alloc instead */
+ /* usually NULL and set by ->sdev_init instead */
sdev->hostdata = hostdata;
/* if the device needs this changing, it may do so in the
- * slave_configure function */
+ * sdev_configure function */
sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
/*
@@ -266,24 +333,43 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
*/
sdev->borken = 1;
- sdev->request_queue = scsi_mq_alloc_queue(sdev);
- if (!sdev->request_queue) {
+ sdev->sg_reserved_size = INT_MAX;
+
+ scsi_init_limits(shost, &lim);
+ q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev);
+ if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
put_device(&starget->dev);
kfree(sdev);
goto out;
}
- WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
- sdev->request_queue->queuedata = sdev;
-
- scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
- sdev->host->cmd_per_lun : 1);
+ kref_get(&sdev->host->tagset_refcnt);
+ sdev->request_queue = q;
scsi_sysfs_device_initialize(sdev);
- if (shost->hostt->slave_alloc) {
- ret = shost->hostt->slave_alloc(sdev);
+ if (scsi_device_is_pseudo_dev(sdev))
+ return sdev;
+
+ depth = sdev->host->cmd_per_lun ?: 1;
+
+ /*
+ * Use .can_queue as budget map's depth because we have to
+ * support adjusting queue depth from sysfs. Meantime use
+ * default device queue depth to figure out sbitmap shift
+ * since we use this queue depth most of times.
+ */
+ if (scsi_realloc_sdev_budget_map(sdev, depth)) {
+ put_device(&starget->dev);
+ kfree(sdev);
+ goto out;
+ }
+
+ scsi_change_queue_depth(sdev, depth);
+
+ if (shost->hostt->sdev_init) {
+ ret = shost->hostt->sdev_init(sdev);
if (ret) {
/*
* if LLDD reports slave not present, don't clutter
@@ -331,7 +417,7 @@ static void scsi_target_dev_release(struct device *dev)
put_device(parent);
}
-static struct device_type scsi_target_type = {
+static const struct device_type scsi_target_type = {
.name = "scsi_target",
.release = scsi_target_dev_release,
};
@@ -431,6 +517,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
dev->bus = &scsi_bus_type;
dev->type = &scsi_target_type;
+ scsi_enable_async_suspend(dev);
starget->id = id;
starget->channel = channel;
starget->can_queue = 0;
@@ -454,7 +541,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
error = shost->hostt->target_alloc(starget);
if(error) {
- dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
+ if (error != -ENXIO)
+ dev_err(dev, "target allocation failed, error %d\n", error);
/* don't want scsi_target_reap to do the final
* put because it will be under the host lock */
scsi_target_destroy(starget);
@@ -543,6 +631,7 @@ void scsi_sanitize_inquiry_string(unsigned char *s, int len)
}
EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
+
/**
* scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
* @sdev: scsi_device to probe
@@ -563,8 +652,38 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
int first_inquiry_len, try_inquiry_len, next_inquiry_len;
int response_len = 0;
- int pass, count, result;
- struct scsi_sense_hdr sshdr;
+ int pass, count, result, resid;
+ struct scsi_failure failure_defs[] = {
+ /*
+ * not-ready to ready transition [asc/ascq=0x28/0x0] or
+ * power-on, reset [asc/ascq=0x29/0x0], continue. INQUIRY
+ * should not yield UNIT_ATTENTION but many buggy devices do
+ * so anyway.
+ */
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x28,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .allowed = 1,
+ .result = DID_TIME_OUT << 16,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .total_allowed = 3,
+ .failure_definitions = failure_defs,
+ };
+ const struct scsi_exec_args exec_args = {
+ .resid = &resid,
+ .failures = &failures,
+ };
*bflags = 0;
@@ -581,40 +700,25 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
pass, try_inquiry_len));
/* Each pass gets up to three chances to ignore Unit Attention */
- for (count = 0; count < 3; ++count) {
- int resid;
+ scsi_failures_reset_retries(&failures);
+ for (count = 0; count < 3; ++count) {
memset(scsi_cmd, 0, 6);
scsi_cmd[0] = INQUIRY;
scsi_cmd[4] = (unsigned char) try_inquiry_len;
memset(inq_result, 0, try_inquiry_len);
- result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
- inq_result, try_inquiry_len, &sshdr,
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
+ inq_result, try_inquiry_len,
HZ / 2 + HZ * scsi_inq_timeout, 3,
- &resid);
+ &exec_args);
SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
"scsi scan: INQUIRY %s with code 0x%x\n",
result ? "failed" : "successful", result));
- if (result) {
- /*
- * not-ready to ready transition [asc/ascq=0x28/0x0]
- * or power-on, reset [asc/ascq=0x29/0x0], continue.
- * INQUIRY should not yield UNIT_ATTENTION
- * but many buggy devices do so anyway.
- */
- if (driver_byte(result) == DRIVER_SENSE &&
- scsi_sense_valid(&sshdr)) {
- if ((sshdr.sense_key == UNIT_ATTENTION) &&
- ((sshdr.asc == 0x28) ||
- (sshdr.asc == 0x29)) &&
- (sshdr.ascq == 0))
- continue;
- }
- } else {
+ if (result == 0) {
/*
* if nothing was transferred, we try
* again. It's a workaround for some USB
@@ -650,7 +754,17 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (pass == 1) {
if (BLIST_INQUIRY_36 & *bflags)
next_inquiry_len = 36;
- else if (sdev->inquiry_len)
+ /*
+ * LLD specified a maximum sdev->inquiry_len
+ * but device claims it has more data. Capping
+ * the length only makes sense for legacy
+ * devices. If a device supports SPC-4 (2014)
+ * or newer, assume that it is safe to ask for
+ * as much as the device says it supports.
+ */
+ else if (sdev->inquiry_len &&
+ response_len > sdev->inquiry_len &&
+ (inq_result[2] & 0x7) < 6) /* SPC-4 */
next_inquiry_len = sdev->inquiry_len;
else
next_inquiry_len = response_len;
@@ -727,7 +841,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
* non-zero LUNs can be scanned.
*/
- sdev->scsi_level = inq_result[2] & 0x07;
+ sdev->scsi_level = inq_result[2] & 0x0f;
if (sdev->scsi_level >= 2 ||
(sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
sdev->scsi_level++;
@@ -764,6 +878,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
blist_flags_t *bflags, int async)
{
+ const struct scsi_host_template *hostt = sdev->host->hostt;
+ struct queue_limits lim;
int ret;
/*
@@ -788,7 +904,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
*/
sdev->inquiry = kmemdup(inq_result,
max_t(size_t, sdev->inquiry_len, 36),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (sdev->inquiry == NULL)
return SCSI_SCAN_NO_RESPONSE;
@@ -796,7 +912,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->model = (char *) (sdev->inquiry + 16);
sdev->rev = (char *) (sdev->inquiry + 32);
- if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
+ sdev->is_ata = strncmp(sdev->vendor, "ATA ", 8) == 0;
+ if (sdev->is_ata) {
/*
* sata emulation layer device. This is a hack to work around
* the SATL power management specifications which state that
@@ -895,19 +1012,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->select_no_atn = 1;
/*
- * Maximum 512 sector transfer length
- * broken RA4x00 Compaq Disk Array
- */
- if (*bflags & BLIST_MAX_512)
- blk_queue_max_hw_sectors(sdev->request_queue, 512);
- /*
- * Max 1024 sector transfer length for targets that report incorrect
- * max/optimal lengths and relied on the old block layer safe default
- */
- else if (*bflags & BLIST_MAX_1024)
- blk_queue_max_hw_sectors(sdev->request_queue, 1024);
-
- /*
* Some devices may not want to have a start command automatically
* issued when a device is added.
*/
@@ -952,6 +1056,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_UNMAP_LIMIT_WS)
sdev->unmap_limit_for_ws = 1;
+ if (*bflags & BLIST_IGN_MEDIA_CHANGE)
+ sdev->ignore_media_change = 1;
+
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
@@ -959,28 +1066,61 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
else if (*bflags & BLIST_SKIP_VPD_PAGES)
sdev->skip_vpd_pages = 1;
+ if (*bflags & BLIST_NO_VPD_SIZE)
+ sdev->no_vpd_size = 1;
+
transport_configure_device(&sdev->sdev_gendev);
- if (sdev->host->hostt->slave_configure) {
- ret = sdev->host->hostt->slave_configure(sdev);
- if (ret) {
- /*
- * if LLDD reports slave not present, don't clutter
- * console with alloc failure messages
- */
- if (ret != -ENXIO) {
- sdev_printk(KERN_ERR, sdev,
- "failed to configure device\n");
- }
- return SCSI_SCAN_NO_RESPONSE;
- }
+ sdev->sdev_bflags = *bflags;
+
+ if (scsi_device_is_pseudo_dev(sdev))
+ return SCSI_SCAN_LUN_PRESENT;
+
+ /*
+ * No need to freeze the queue as it isn't reachable to anyone else yet.
+ */
+ lim = queue_limits_start_update(sdev->request_queue);
+ if (*bflags & BLIST_MAX_512)
+ lim.max_hw_sectors = 512;
+ else if (*bflags & BLIST_MAX_1024)
+ lim.max_hw_sectors = 1024;
+
+ if (hostt->sdev_configure)
+ ret = hostt->sdev_configure(sdev, &lim);
+ if (ret) {
+ queue_limits_cancel_update(sdev->request_queue);
+ /*
+ * If the LLDD reports device not present, don't clutter the
+ * console with failure messages.
+ */
+ if (ret != -ENXIO)
+ sdev_printk(KERN_ERR, sdev,
+ "failed to configure device\n");
+ return SCSI_SCAN_NO_RESPONSE;
+ }
+
+ ret = queue_limits_commit_update(sdev->request_queue, &lim);
+ if (ret) {
+ sdev_printk(KERN_ERR, sdev, "failed to apply queue limits.\n");
+ return SCSI_SCAN_NO_RESPONSE;
}
+ /*
+ * The queue_depth is often changed in ->sdev_configure.
+ *
+ * Set up budget map again since memory consumption of the map depends
+ * on actual queue depth.
+ */
+ if (hostt->sdev_configure)
+ scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
+
if (sdev->scsi_level >= SCSI_3)
scsi_attach_vpd(sdev);
+ scsi_cdl_check(sdev);
+
sdev->max_queue_depth = sdev->queue_depth;
- sdev->sdev_bflags = *bflags;
+ WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
/*
* Ok, the device is now all set up, we can
@@ -1079,8 +1219,13 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
if (!sdev)
goto out;
- result = kmalloc(result_len, GFP_ATOMIC |
- ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
+ if (scsi_device_is_pseudo_dev(sdev)) {
+ if (bflagsp)
+ *bflagsp = BLIST_NOLUN;
+ return SCSI_SCAN_LUN_PRESENT;
+ }
+
+ result = kmalloc(result_len, GFP_KERNEL);
if (!result)
goto out_free_sdev;
@@ -1292,12 +1437,35 @@ static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflag
unsigned int length;
u64 lun;
unsigned int num_luns;
- unsigned int retries;
int result;
struct scsi_lun *lunp, *lun_data;
- struct scsi_sense_hdr sshdr;
struct scsi_device *sdev;
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Fail all CCs except the UA above */
+ {
+ .sense = SCMD_FAILURE_SENSE_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Retry any other errors not listed above */
+ {
+ .result = SCMD_FAILURE_RESULT_ANY,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .total_allowed = 3,
+ .failure_definitions = failure_defs,
+ };
+ const struct scsi_exec_args exec_args = {
+ .failures = &failures,
+ };
int ret = 0;
/*
@@ -1336,8 +1504,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflag
*/
length = (511 + 1) * sizeof(struct scsi_lun);
retry:
- lun_data = kmalloc(length, GFP_KERNEL |
- (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
+ lun_data = kmalloc(length, GFP_KERNEL);
if (!lun_data) {
printk(ALLOC_FAILURE_MSG, __func__);
goto out;
@@ -1368,28 +1535,18 @@ retry:
* should come through as a check condition, and will not generate
* a retry.
*/
- for (retries = 0; retries < 3; retries++) {
- SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
- "scsi scan: Sending REPORT LUNS to (try %d)\n",
- retries));
-
- result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
- lun_data, length, &sshdr,
- SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
-
- SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
- "scsi scan: REPORT LUNS"
- " %s (try %d) result 0x%x\n",
- result ? "failed" : "successful",
- retries, result));
- if (result == 0)
- break;
- else if (scsi_sense_valid(&sshdr)) {
- if (sshdr.sense_key != UNIT_ATTENTION)
- break;
- }
- }
+ scsi_failures_reset_retries(&failures);
+ SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
+ "scsi scan: Sending REPORT LUNS\n"));
+
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, lun_data,
+ length, SCSI_REPORT_LUNS_TIMEOUT, 3,
+ &exec_args);
+
+ SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
+ "scsi scan: REPORT LUNS %s result 0x%x\n",
+ result ? "failed" : "successful", result));
if (result) {
/*
* The device probably does not support a REPORT LUN command
@@ -1476,7 +1633,8 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
scsi_complete_async_scans();
if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
- scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
+ scsi_probe_and_add_lun(starget, lun, NULL, &sdev,
+ SCSI_SCAN_RESCAN, hostdata);
scsi_autopm_put_host(shost);
}
mutex_unlock(&shost->scan_mutex);
@@ -1492,6 +1650,24 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
}
EXPORT_SYMBOL(__scsi_add_device);
+/**
+ * scsi_add_device - creates a new SCSI (LU) instance
+ * @host: the &Scsi_Host instance where the device is located
+ * @channel: target channel number (rarely other than %0)
+ * @target: target id number
+ * @lun: LUN of target device
+ *
+ * Probe for a specific LUN and add it if found.
+ *
+ * Notes: This call is usually performed internally during a SCSI
+ * bus scan when an HBA is added (i.e. scsi_scan_host()). So it
+ * should only be called if the HBA becomes aware of a new SCSI
+ * device (LU) after scsi_scan_host() has completed. If successful
+ * this call can lead to sdev_init() and sdev_configure() callbacks
+ * into the LLD.
+ *
+ * Return: %0 on success or negative error code on failure
+ */
int scsi_add_device(struct Scsi_Host *host, uint channel,
uint target, u64 lun)
{
@@ -1505,13 +1681,61 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
}
EXPORT_SYMBOL(scsi_add_device);
-void scsi_rescan_device(struct device *dev)
+int scsi_resume_device(struct scsi_device *sdev)
+{
+ struct device *dev = &sdev->sdev_gendev;
+ int ret = 0;
+
+ device_lock(dev);
+
+ /*
+ * Bail out if the device or its queue are not running. Otherwise,
+ * the rescan may block waiting for commands to be executed, with us
+ * holding the device lock. This can result in a potential deadlock
+ * in the power management core code when system resume is on-going.
+ */
+ if (sdev->sdev_state != SDEV_RUNNING ||
+ blk_queue_pm_only(sdev->request_queue)) {
+ ret = -EWOULDBLOCK;
+ goto unlock;
+ }
+
+ if (dev->driver && try_module_get(dev->driver->owner)) {
+ struct scsi_driver *drv = to_scsi_driver(dev->driver);
+
+ if (drv->resume)
+ ret = drv->resume(dev);
+ module_put(dev->driver->owner);
+ }
+
+unlock:
+ device_unlock(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(scsi_resume_device);
+
+int scsi_rescan_device(struct scsi_device *sdev)
{
- struct scsi_device *sdev = to_scsi_device(dev);
+ struct device *dev = &sdev->sdev_gendev;
+ int ret = 0;
device_lock(dev);
+ /*
+ * Bail out if the device or its queue are not running. Otherwise,
+ * the rescan may block waiting for commands to be executed, with us
+ * holding the device lock. This can result in a potential deadlock
+ * in the power management core code when system resume is on-going.
+ */
+ if (sdev->sdev_state != SDEV_RUNNING ||
+ blk_queue_pm_only(sdev->request_queue)) {
+ ret = -EWOULDBLOCK;
+ goto unlock;
+ }
+
scsi_attach_vpd(sdev);
+ scsi_cdl_check(sdev);
if (sdev->handler && sdev->handler->rescan)
sdev->handler->rescan(sdev);
@@ -1523,7 +1747,11 @@ void scsi_rescan_device(struct device *dev)
drv->rescan(dev);
module_put(dev->driver->owner);
}
+
+unlock:
device_unlock(dev);
+
+ return ret;
}
EXPORT_SYMBOL(scsi_rescan_device);
@@ -1685,7 +1913,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
return 0;
}
-
+EXPORT_SYMBOL(scsi_scan_host_selected);
static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
@@ -1714,15 +1942,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
*/
static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
{
- struct async_scan_data *data;
+ struct async_scan_data *data = NULL;
unsigned long flags;
if (strncmp(scsi_scan_type, "sync", 4) == 0)
return NULL;
+ mutex_lock(&shost->scan_mutex);
if (shost->async_scan) {
shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
- return NULL;
+ goto err;
}
data = kmalloc(sizeof(*data), GFP_KERNEL);
@@ -1733,7 +1962,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
goto err;
init_completion(&data->prev_finished);
- mutex_lock(&shost->scan_mutex);
spin_lock_irqsave(shost->host_lock, flags);
shost->async_scan = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -1748,6 +1976,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
return data;
err:
+ mutex_unlock(&shost->scan_mutex);
kfree(data);
return NULL;
}
@@ -1814,7 +2043,7 @@ static void do_scsi_scan_host(struct Scsi_Host *shost)
msleep(10);
} else {
scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
- SCAN_WILD_CARD, 0);
+ SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
}
}
@@ -1830,6 +2059,8 @@ static void do_scan_async(void *_data, async_cookie_t c)
/**
* scsi_scan_host - scan the given adapter
* @shost: adapter to scan
+ *
+ * Notes: Should be called after scsi_add_host()
**/
void scsi_scan_host(struct Scsi_Host *shost)
{
@@ -1865,69 +2096,65 @@ void scsi_forget_host(struct Scsi_Host *shost)
restart:
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(sdev, &shost->__devices, siblings) {
- if (sdev->sdev_state == SDEV_DEL)
+ if (scsi_device_is_pseudo_dev(sdev) ||
+ sdev->sdev_state == SDEV_DEL)
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_device(sdev);
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /*
+ * Remove the pseudo device last since it may be needed during removal
+ * of other SCSI devices.
+ */
+ if (shost->pseudo_sdev)
+ __scsi_remove_device(shost->pseudo_sdev);
}
/**
- * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself
- * @shost: Host that needs a scsi_device
+ * scsi_get_pseudo_sdev() - Attach a pseudo SCSI device to a SCSI host
+ * @shost: Host that needs a pseudo SCSI device
*
* Lock status: None assumed.
*
* Returns: The scsi_device or NULL
*
* Notes:
- * Attach a single scsi_device to the Scsi_Host - this should
- * be made to look like a "pseudo-device" that points to the
- * HA itself.
- *
- * Note - this device is not accessible from any high-level
- * drivers (including generics), which is probably not
- * optimal. We can add hooks later to attach.
+ * Attach a single scsi_device to the Scsi_Host. The primary aim for this
+ * device is to serve as a container from which SCSI commands can be
+ * allocated. Each SCSI command will carry a command tag allocated by the
+ * block layer. These SCSI commands can be used by the LLDD to send
+ * internal or passthrough commands without having to manage tag allocation
+ * inside the LLDD.
*/
-struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
+struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *shost)
{
struct scsi_device *sdev = NULL;
struct scsi_target *starget;
- mutex_lock(&shost->scan_mutex);
+ guard(mutex)(&shost->scan_mutex);
+
if (!scsi_host_scan_allowed(shost))
goto out;
- starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
+
+ starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->max_id);
if (!starget)
goto out;
- sdev = scsi_alloc_sdev(starget, 0, NULL);
- if (sdev)
- sdev->borken = 0;
- else
+ sdev = scsi_alloc_sdev(starget, U64_MAX, NULL);
+ if (!sdev) {
scsi_target_reap(starget);
- put_device(&starget->dev);
- out:
- mutex_unlock(&shost->scan_mutex);
- return sdev;
-}
-EXPORT_SYMBOL(scsi_get_host_dev);
+ goto put_target;
+ }
-/**
- * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself
- * @sdev: Host device to be freed
- *
- * Lock status: None assumed.
- *
- * Returns: Nothing
- */
-void scsi_free_host_dev(struct scsi_device *sdev)
-{
- BUG_ON(sdev->id != sdev->host->this_id);
+ sdev->borken = 0;
- __scsi_remove_device(sdev);
-}
-EXPORT_SYMBOL(scsi_free_host_dev);
+put_target:
+ /* See also the get_device(dev) call in scsi_alloc_target(). */
+ put_device(&starget->dev);
+out:
+ return sdev;
+}