summaryrefslogtreecommitdiff
path: root/drivers/ata/libata-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/libata-core.c')
-rw-r--r--drivers/ata/libata-core.c285
1 files changed, 224 insertions, 61 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 74314311295f..6fb4e8dc8c3c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1972,6 +1972,141 @@ retry:
return rc;
}
+bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf,
+ bool set_active)
+{
+ /* Only applies to ATA and ZAC devices */
+ if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
+ return false;
+
+ ata_tf_init(dev, tf);
+ tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf->protocol = ATA_PROT_NODATA;
+
+ if (set_active) {
+ /* VERIFY for 1 sector at lba=0 */
+ tf->command = ATA_CMD_VERIFY;
+ tf->nsect = 1;
+ if (dev->flags & ATA_DFLAG_LBA) {
+ tf->flags |= ATA_TFLAG_LBA;
+ tf->device |= ATA_LBA;
+ } else {
+ /* CHS */
+ tf->lbal = 0x1; /* sect */
+ }
+ } else {
+ tf->command = ATA_CMD_STANDBYNOW1;
+ }
+
+ return true;
+}
+
+/**
+ * ata_dev_power_set_standby - Set a device power mode to standby
+ * @dev: target device
+ *
+ * Issue a STANDBY IMMEDIATE command to set a device power mode to standby.
+ * For an HDD device, this spins down the disks.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_dev_power_set_standby(struct ata_device *dev)
+{
+ unsigned long ap_flags = dev->link->ap->flags;
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ /*
+ * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
+ * causing some drives to spin up and down again. For these, do nothing
+ * if we are being called on shutdown.
+ */
+ if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
+ system_state == SYSTEM_POWER_OFF)
+ return;
+
+ if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
+ system_entering_hibernation())
+ return;
+
+ /* Issue STANDBY IMMEDIATE command only if supported by the device */
+ if (!ata_dev_power_init_tf(dev, &tf, false))
+ return;
+
+ ata_dev_notice(dev, "Entering standby power mode\n");
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask)
+ ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n",
+ err_mask);
+}
+
+static bool ata_dev_power_is_active(struct ata_device *dev)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.command = ATA_CMD_CHK_POWER;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask) {
+ ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
+ err_mask);
+ /*
+ * Assume we are in standby mode so that we always force a
+ * spinup in ata_dev_power_set_active().
+ */
+ return false;
+ }
+
+ ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
+
+ /* Active or idle */
+ return tf.nsect == 0xff;
+}
+
+/**
+ * ata_dev_power_set_active - Set a device power mode to active
+ * @dev: target device
+ *
+ * Issue a VERIFY command to enter to ensure that the device is in the
+ * active power mode. For a spun-down HDD (standby or idle power mode),
+ * the VERIFY command will complete after the disk spins up.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_dev_power_set_active(struct ata_device *dev)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ /*
+ * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
+ * if supported by the device.
+ */
+ if (!ata_dev_power_init_tf(dev, &tf, true))
+ return;
+
+ /*
+ * Check the device power state & condition and force a spinup with
+ * VERIFY command only if the drive is not already ACTIVE or IDLE.
+ */
+ if (ata_dev_power_is_active(dev))
+ return;
+
+ ata_dev_notice(dev, "Entering active power mode\n");
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask)
+ ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n",
+ err_mask);
+}
+
/**
* ata_read_log_page - read a specific log page
* @dev: target device
@@ -2529,7 +2664,7 @@ static int ata_dev_config_lba(struct ata_device *dev)
{
const u16 *id = dev->id;
const char *lba_desc;
- char ncq_desc[24];
+ char ncq_desc[32];
int ret;
dev->flags |= ATA_DFLAG_LBA;
@@ -4783,11 +4918,8 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* been aborted by the device due to a limit timeout using the policy
* 0xD. For these commands, invoke EH to get the command sense data.
*/
- if (qc->result_tf.status & ATA_SENSE &&
- ((ata_is_ncq(qc->tf.protocol) &&
- dev->flags & ATA_DFLAG_CDL_ENABLED) ||
- (!ata_is_ncq(qc->tf.protocol) &&
- ata_id_sense_reporting_enabled(dev->id)))) {
+ if (qc->flags & ATA_QCFLAG_HAS_CDL &&
+ qc->result_tf.status & ATA_SENSE) {
/*
* Tell SCSI EH to not overwrite scmd->result even if this
* command is finished with result SAM_STAT_GOOD.
@@ -5040,17 +5172,19 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
struct ata_link *link;
unsigned long flags;
- /* Previous resume operation might still be in
- * progress. Wait for PM_PENDING to clear.
+ spin_lock_irqsave(ap->lock, flags);
+
+ /*
+ * A previous PM operation might still be in progress. Wait for
+ * ATA_PFLAG_PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+ spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ spin_lock_irqsave(ap->lock, flags);
}
- /* request PM ops to EH */
- spin_lock_irqsave(ap->lock, flags);
-
+ /* Request PM operation to EH */
ap->pm_mesg = mesg;
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
@@ -5062,31 +5196,33 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_unlock_irqrestore(ap->lock, flags);
- if (!async) {
+ if (!async)
ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
- }
}
-/*
- * On some hardware, device fails to respond after spun down for suspend. As
- * the device won't be used before being resumed, we don't need to touch the
- * device. Ask EH to skip the usual stuff and proceed directly to suspend.
- *
- * http://thread.gmane.org/gmane.linux.ide/46764
- */
-static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
- | ATA_EHI_NO_AUTOPSY
- | ATA_EHI_NO_RECOVERY;
-
-static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
+static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg,
+ bool async)
{
- ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
-}
+ /*
+ * We are about to suspend the port, so we do not care about
+ * scsi_rescan_device() calls scheduled by previous resume operations.
+ * The next resume will schedule the rescan again. So cancel any rescan
+ * that is not done yet.
+ */
+ cancel_delayed_work_sync(&ap->scsi_rescan_task);
-static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
-{
- ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
+ /*
+ * On some hardware, device fails to respond after spun down for
+ * suspend. As the device will not be used until being resumed, we
+ * do not need to touch the device. Ask EH to skip the usual stuff
+ * and proceed directly to suspend.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/46764
+ */
+ ata_port_request_pm(ap, mesg, 0,
+ ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
+ ATA_EHI_NO_RECOVERY,
+ async);
}
static int ata_port_pm_suspend(struct device *dev)
@@ -5096,7 +5232,7 @@ static int ata_port_pm_suspend(struct device *dev)
if (pm_runtime_suspended(dev))
return 0;
- ata_port_suspend(ap, PMSG_SUSPEND);
+ ata_port_suspend(ap, PMSG_SUSPEND, false);
return 0;
}
@@ -5107,35 +5243,29 @@ static int ata_port_pm_freeze(struct device *dev)
if (pm_runtime_suspended(dev))
return 0;
- ata_port_suspend(ap, PMSG_FREEZE);
+ ata_port_suspend(ap, PMSG_FREEZE, false);
return 0;
}
static int ata_port_pm_poweroff(struct device *dev)
{
- ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
+ if (!pm_runtime_suspended(dev))
+ ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE, false);
return 0;
}
-static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
- | ATA_EHI_QUIET;
-
-static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
-{
- ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
-}
-
-static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
+static void ata_port_resume(struct ata_port *ap, pm_message_t mesg,
+ bool async)
{
- ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
+ ata_port_request_pm(ap, mesg, ATA_EH_RESET,
+ ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET,
+ async);
}
static int ata_port_pm_resume(struct device *dev)
{
- ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ if (!pm_runtime_suspended(dev))
+ ata_port_resume(to_ata_port(dev), PMSG_RESUME, true);
return 0;
}
@@ -5165,13 +5295,13 @@ static int ata_port_runtime_idle(struct device *dev)
static int ata_port_runtime_suspend(struct device *dev)
{
- ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
+ ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND, false);
return 0;
}
static int ata_port_runtime_resume(struct device *dev)
{
- ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
+ ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME, false);
return 0;
}
@@ -5195,13 +5325,13 @@ static const struct dev_pm_ops ata_port_pm_ops = {
*/
void ata_sas_port_suspend(struct ata_port *ap)
{
- ata_port_suspend_async(ap, PMSG_SUSPEND);
+ ata_port_suspend(ap, PMSG_SUSPEND, true);
}
EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
void ata_sas_port_resume(struct ata_port *ap)
{
- ata_port_resume_async(ap, PMSG_RESUME);
+ ata_port_resume(ap, PMSG_RESUME, true);
}
EXPORT_SYMBOL_GPL(ata_sas_port_resume);
@@ -5232,7 +5362,7 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
#endif
const struct device_type ata_port_type = {
- .name = "ata_port",
+ .name = ATA_PORT_TYPE_NAME,
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
#endif
@@ -5923,7 +6053,7 @@ int ata_host_activate(struct ata_host *host, int irq,
return rc;
for (i = 0; i < host->n_ports; i++)
- ata_port_desc(host->ports[i], "irq %d", irq);
+ ata_port_desc_misc(host->ports[i], irq);
rc = ata_host_register(host, sht);
/* if failed, just free the IRQ and leave ports alone */
@@ -5951,11 +6081,33 @@ static void ata_port_detach(struct ata_port *ap)
struct ata_link *link;
struct ata_device *dev;
- /* tell EH we're leaving & flush EH */
+ /* Ensure ata_port probe has completed */
+ async_synchronize_cookie(ap->cookie + 1);
+
+ /* Wait for any ongoing EH */
+ ata_port_wait_eh(ap);
+
+ mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
+
+ /* Remove scsi devices */
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(dev, link, ALL) {
+ if (dev->sdev) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ scsi_remove_device(dev->sdev);
+ spin_lock_irqsave(ap->lock, flags);
+ dev->sdev = NULL;
+ }
+ }
+ }
+
+ /* Tell EH to disable all devices */
ap->pflags |= ATA_PFLAG_UNLOADING;
ata_port_schedule_eh(ap);
+
spin_unlock_irqrestore(ap->lock, flags);
+ mutex_unlock(&ap->scsi_scan_mutex);
/* wait till EH commits suicide */
ata_port_wait_eh(ap);
@@ -5996,11 +6148,8 @@ void ata_host_detach(struct ata_host *host)
{
int i;
- for (i = 0; i < host->n_ports; i++) {
- /* Ensure ata_port probe has completed */
- async_synchronize_cookie(host->ports[i]->cookie + 1);
+ for (i = 0; i < host->n_ports; i++)
ata_port_detach(host->ports[i]);
- }
/* the host is dead now, dissociate ACPI */
ata_acpi_dissociate(host);
@@ -6031,10 +6180,24 @@ EXPORT_SYMBOL_GPL(ata_pci_remove_one);
void ata_pci_shutdown_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
+ struct ata_port *ap;
+ unsigned long flags;
int i;
+ /* Tell EH to disable all devices */
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
+ ap = host->ports[i];
+ spin_lock_irqsave(ap->lock, flags);
+ ap->pflags |= ATA_PFLAG_UNLOADING;
+ ata_port_schedule_eh(ap);
+ spin_unlock_irqrestore(ap->lock, flags);
+ }
+
+ for (i = 0; i < host->n_ports; i++) {
+ ap = host->ports[i];
+
+ /* Wait for EH to complete before freezing the port */
+ ata_port_wait_eh(ap);
ap->pflags |= ATA_PFLAG_FROZEN;