summaryrefslogtreecommitdiff
path: root/drivers/mmc/core/mmc_ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/core/mmc_ops.c')
-rw-r--r--drivers/mmc/core/mmc_ops.c557
1 files changed, 355 insertions, 202 deletions
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 9054329fe903..a952cc8265af 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/drivers/mmc/core/mmc_ops.h
*
* Copyright 2006-2007 Pierre Ossman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
*/
#include <linux/slab.h>
@@ -23,7 +19,10 @@
#include "host.h"
#include "mmc_ops.h"
-#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
+#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
+#define MMC_OP_COND_PERIOD_US (4 * 1000) /* 4ms */
+#define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
static const u8 tuning_blk_pattern_4bit[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@@ -55,6 +54,18 @@ static const u8 tuning_blk_pattern_8bit[] = {
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
+struct mmc_busy_data {
+ struct mmc_card *card;
+ bool retry_crc_err;
+ enum mmc_busy_cmd busy_cmd;
+};
+
+struct mmc_op_cond_busy_data {
+ struct mmc_host *host;
+ u32 ocr;
+ struct mmc_command *cmd;
+};
+
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
{
int err;
@@ -133,10 +144,24 @@ int mmc_set_dsr(struct mmc_host *host)
return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
}
+int __mmc_go_idle(struct mmc_host *host)
+{
+ struct mmc_command cmd = {};
+ int err;
+
+ cmd.opcode = MMC_GO_IDLE_STATE;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ mmc_delay(1);
+
+ return err;
+}
+
int mmc_go_idle(struct mmc_host *host)
{
int err;
- struct mmc_command cmd = {};
/*
* Non-SPI hosts need to prevent chipselect going active during
@@ -152,13 +177,7 @@ int mmc_go_idle(struct mmc_host *host)
mmc_delay(1);
}
- cmd.opcode = MMC_GO_IDLE_STATE;
- cmd.arg = 0;
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
-
- err = mmc_wait_for_cmd(host, &cmd, 0);
-
- mmc_delay(1);
+ err = __mmc_go_idle(host);
if (!mmc_host_is_spi(host)) {
mmc_set_chip_select(host, MMC_CS_DONTCARE);
@@ -170,37 +189,64 @@ int mmc_go_idle(struct mmc_host *host)
return err;
}
+static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
+{
+ struct mmc_op_cond_busy_data *data = cb_data;
+ struct mmc_host *host = data->host;
+ struct mmc_command *cmd = data->cmd;
+ u32 ocr = data->ocr;
+ int err = 0;
+
+ err = mmc_wait_for_cmd(host, cmd, 0);
+ if (err)
+ return err;
+
+ if (mmc_host_is_spi(host)) {
+ if (!(cmd->resp[0] & R1_SPI_IDLE)) {
+ *busy = false;
+ return 0;
+ }
+ } else {
+ if (cmd->resp[0] & MMC_CARD_BUSY) {
+ *busy = false;
+ return 0;
+ }
+ }
+
+ *busy = true;
+
+ /*
+ * According to eMMC specification v5.1 section 6.4.3, we
+ * should issue CMD1 repeatedly in the idle state until
+ * the eMMC is ready. Otherwise some eMMC devices seem to enter
+ * the inactive mode after mmc_init_card() issued CMD0 when
+ * the eMMC device is busy.
+ */
+ if (!ocr && !mmc_host_is_spi(host))
+ cmd->arg = cmd->resp[0] | BIT(30);
+
+ return 0;
+}
+
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {};
- int i, err = 0;
+ int err = 0;
+ struct mmc_op_cond_busy_data cb_data = {
+ .host = host,
+ .ocr = ocr,
+ .cmd = &cmd
+ };
cmd.opcode = MMC_SEND_OP_COND;
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
- for (i = 100; i; i--) {
- err = mmc_wait_for_cmd(host, &cmd, 0);
- if (err)
- break;
-
- /* if we're just probing, do a single pass */
- if (ocr == 0)
- break;
-
- /* otherwise wait until reset completes */
- if (mmc_host_is_spi(host)) {
- if (!(cmd.resp[0] & R1_SPI_IDLE))
- break;
- } else {
- if (cmd.resp[0] & MMC_CARD_BUSY)
- break;
- }
-
- err = -ETIMEDOUT;
-
- mmc_delay(10);
- }
+ err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
+ MMC_OP_COND_TIMEOUT_MS,
+ &__mmc_send_op_cond_cb, &cb_data);
+ if (err)
+ return err;
if (rocr && !mmc_host_is_spi(host))
*rocr = cmd.resp[0];
@@ -242,9 +288,8 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
* NOTE: void *buf, caller for the buf is required to use DMA-capable
* buffer or on-stack buffer (with some overhead in callee).
*/
-static int
-mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
- u32 opcode, void *buf, unsigned len)
+int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
+ u32 args, void *buf, unsigned len)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
@@ -255,7 +300,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
mrq.data = &data;
cmd.opcode = opcode;
- cmd.arg = 0;
+ cmd.arg = args;
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
@@ -292,61 +337,40 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
return 0;
}
-static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
+static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
{
int ret, i;
- __be32 *csd_tmp;
+ __be32 *cxd_tmp;
- csd_tmp = kzalloc(16, GFP_KERNEL);
- if (!csd_tmp)
+ cxd_tmp = kzalloc(16, GFP_KERNEL);
+ if (!cxd_tmp)
return -ENOMEM;
- ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
+ ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
if (ret)
goto err;
for (i = 0; i < 4; i++)
- csd[i] = be32_to_cpu(csd_tmp[i]);
+ cxd[i] = be32_to_cpu(cxd_tmp[i]);
err:
- kfree(csd_tmp);
+ kfree(cxd_tmp);
return ret;
}
int mmc_send_csd(struct mmc_card *card, u32 *csd)
{
if (mmc_host_is_spi(card->host))
- return mmc_spi_send_csd(card, csd);
+ return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
return mmc_send_cxd_native(card->host, card->rca << 16, csd,
MMC_SEND_CSD);
}
-static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
-{
- int ret, i;
- __be32 *cid_tmp;
-
- cid_tmp = kzalloc(16, GFP_KERNEL);
- if (!cid_tmp)
- return -ENOMEM;
-
- ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
- if (ret)
- goto err;
-
- for (i = 0; i < 4; i++)
- cid[i] = be32_to_cpu(cid_tmp[i]);
-
-err:
- kfree(cid_tmp);
- return ret;
-}
-
int mmc_send_cid(struct mmc_host *host, u32 *cid)
{
if (mmc_host_is_spi(host))
- return mmc_spi_send_cid(host, cid);
+ return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
}
@@ -359,7 +383,7 @@ int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
if (!card || !new_ext_csd)
return -EINVAL;
- if (!mmc_can_ext_csd(card))
+ if (!mmc_card_can_ext_csd(card))
return -EOPNOTSUPP;
/*
@@ -370,7 +394,7 @@ int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
if (!ext_csd)
return -ENOMEM;
- err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
+ err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
512);
if (err)
kfree(ext_csd);
@@ -427,7 +451,7 @@ static int mmc_switch_status_error(struct mmc_host *host, u32 status)
}
/* Caller must hold re-tuning */
-int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
+int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
{
u32 status;
int err;
@@ -441,35 +465,59 @@ int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
return mmc_switch_status_error(card->host, status);
}
-int mmc_switch_status(struct mmc_card *card)
+static int mmc_busy_cb(void *cb_data, bool *busy)
{
- return __mmc_switch_status(card, true);
+ struct mmc_busy_data *data = cb_data;
+ struct mmc_host *host = data->card->host;
+ u32 status = 0;
+ int err;
+
+ if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
+ *busy = host->ops->card_busy(host);
+ return 0;
+ }
+
+ err = mmc_send_status(data->card, &status);
+ if (data->retry_crc_err && err == -EILSEQ) {
+ *busy = true;
+ return 0;
+ }
+ if (err)
+ return err;
+
+ switch (data->busy_cmd) {
+ case MMC_BUSY_CMD6:
+ err = mmc_switch_status_error(host, status);
+ break;
+ case MMC_BUSY_ERASE:
+ err = R1_STATUS(status) ? -EIO : 0;
+ break;
+ case MMC_BUSY_HPI:
+ case MMC_BUSY_EXTR_SINGLE:
+ case MMC_BUSY_IO:
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (err)
+ return err;
+
+ *busy = !mmc_ready_for_data(status);
+ return 0;
}
-static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- bool send_status, bool retry_crc_err)
+int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
+ unsigned int timeout_ms,
+ int (*busy_cb)(void *cb_data, bool *busy),
+ void *cb_data)
{
- struct mmc_host *host = card->host;
int err;
unsigned long timeout;
- u32 status = 0;
+ unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
bool expired = false;
bool busy = false;
- /* We have an unspecified cmd timeout, use the fallback value. */
- if (!timeout_ms)
- timeout_ms = MMC_OPS_TIMEOUT_MS;
-
- /*
- * In cases when not allowed to poll by using CMD13 or because we aren't
- * capable of polling by using ->card_busy(), then rely on waiting the
- * stated timeout to be sufficient.
- */
- if (!send_status && !host->ops->card_busy) {
- mmc_delay(timeout_ms);
- return 0;
- }
-
timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
/*
@@ -478,21 +526,9 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
*/
expired = time_after(jiffies, timeout);
- if (host->ops->card_busy) {
- busy = host->ops->card_busy(host);
- } else {
- err = mmc_send_status(card, &status);
- if (retry_crc_err && err == -EILSEQ) {
- busy = true;
- } else if (err) {
- return err;
- } else {
- err = mmc_switch_status_error(host, status);
- if (err)
- return err;
- busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
- }
- }
+ err = (*busy_cb)(cb_data, &busy);
+ if (err)
+ return err;
/* Timeout if the device still remains busy. */
if (expired && busy) {
@@ -500,10 +536,54 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
mmc_hostname(host), __func__);
return -ETIMEDOUT;
}
+
+ /* Throttle the polling rate to avoid hogging the CPU. */
+ if (busy) {
+ usleep_range(udelay, udelay * 2);
+ if (udelay < udelay_max)
+ udelay *= 2;
+ }
} while (busy);
return 0;
}
+EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
+
+int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
+{
+ struct mmc_host *host = card->host;
+ struct mmc_busy_data cb_data;
+
+ cb_data.card = card;
+ cb_data.retry_crc_err = retry_crc_err;
+ cb_data.busy_cmd = busy_cmd;
+
+ return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
+}
+EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
+
+bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
+ unsigned int timeout_ms)
+{
+ /*
+ * If the max_busy_timeout of the host is specified, make sure it's
+ * enough to fit the used timeout_ms. In case it's not, let's instruct
+ * the host to avoid HW busy detection, by converting to a R1 response
+ * instead of a R1B. Note, some hosts requires R1B, which also means
+ * they are on their own when it comes to deal with the busy timeout.
+ */
+ if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+ (timeout_ms > host->max_busy_timeout)) {
+ cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
+ return false;
+ }
+
+ cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
+ cmd->busy_timeout = timeout_ms;
+ return true;
+}
+EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd);
/**
* __mmc_switch - modify EXT_CSD register
@@ -514,69 +594,58 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
* @timeout_ms: timeout (ms) for operation performed by register write,
* timeout of zero implies maximum possible timeout
* @timing: new timing to change to
- * @use_busy_signal: use the busy signal as response type
* @send_status: send status cmd to poll for busy
* @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
+ * @retries: number of retries
*
* Modifies the EXT_CSD register for selected card.
*/
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, unsigned char timing,
- bool use_busy_signal, bool send_status, bool retry_crc_err)
+ bool send_status, bool retry_crc_err, unsigned int retries)
{
struct mmc_host *host = card->host;
int err;
struct mmc_command cmd = {};
- bool use_r1b_resp = use_busy_signal;
+ bool use_r1b_resp;
unsigned char old_timing = host->ios.timing;
mmc_retune_hold(host);
- /*
- * If the cmd timeout and the max_busy_timeout of the host are both
- * specified, let's validate them. A failure means we need to prevent
- * the host from doing hw busy detection, which is done by converting
- * to a R1 response instead of a R1B.
- */
- if (timeout_ms && host->max_busy_timeout &&
- (timeout_ms > host->max_busy_timeout))
- use_r1b_resp = false;
+ if (!timeout_ms) {
+ pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
+ mmc_hostname(host));
+ timeout_ms = card->ext_csd.generic_cmd6_time;
+ }
cmd.opcode = MMC_SWITCH;
cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
(index << 16) |
(value << 8) |
set;
- cmd.flags = MMC_CMD_AC;
- if (use_r1b_resp) {
- cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
- /*
- * A busy_timeout of zero means the host can decide to use
- * whatever value it finds suitable.
- */
- cmd.busy_timeout = timeout_ms;
- } else {
- cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
- }
+ use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
- if (index == EXT_CSD_SANITIZE_START)
- cmd.sanitize_busy = true;
-
- err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ err = mmc_wait_for_cmd(host, &cmd, retries);
if (err)
goto out;
- /* No need to check card status in case of unblocking command */
- if (!use_busy_signal)
- goto out;
-
/*If SPI or used HW busy detection above, then we don't need to poll. */
if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
mmc_host_is_spi(host))
goto out_tim;
+ /*
+ * If the host doesn't support HW polling via the ->card_busy() ops and
+ * when it's not allowed to poll by using CMD13, then we need to rely on
+ * waiting the stated timeout to be sufficient.
+ */
+ if (!send_status && !host->ops->card_busy) {
+ mmc_delay(timeout_ms);
+ goto out_tim;
+ }
+
/* Let's try to poll to find out when the command is completed. */
- err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
+ err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
if (err)
goto out;
@@ -586,7 +655,7 @@ out_tim:
mmc_set_timing(host, timing);
if (send_status) {
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, true);
if (err && timing)
mmc_set_timing(host, old_timing);
}
@@ -600,7 +669,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms)
{
return __mmc_switch(card, set, index, value, timeout_ms, 0,
- true, true, false);
+ true, false, MMC_CMD_RETRIES);
}
EXPORT_SYMBOL_GPL(mmc_switch);
@@ -673,7 +742,7 @@ out:
}
EXPORT_SYMBOL_GPL(mmc_send_tuning);
-int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
+int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
{
struct mmc_command cmd = {};
@@ -696,7 +765,7 @@ int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
return mmc_wait_for_cmd(host, &cmd, 0);
}
-EXPORT_SYMBOL_GPL(mmc_abort_tuning);
+EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
@@ -796,32 +865,35 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
}
-static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
+static int mmc_send_hpi_cmd(struct mmc_card *card)
{
+ unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
+ struct mmc_host *host = card->host;
+ bool use_r1b_resp = false;
struct mmc_command cmd = {};
- unsigned int opcode;
int err;
- opcode = card->ext_csd.hpi_cmd;
- if (opcode == MMC_STOP_TRANSMISSION)
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- else if (opcode == MMC_SEND_STATUS)
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
-
- cmd.opcode = opcode;
+ cmd.opcode = card->ext_csd.hpi_cmd;
cmd.arg = card->rca << 16 | 1;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (cmd.opcode == MMC_STOP_TRANSMISSION)
+ use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
+ busy_timeout_ms);
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
if (err) {
- pr_warn("%s: error %d interrupting operation. "
- "HPI command response %#x\n", mmc_hostname(card->host),
- err, cmd.resp[0]);
+ pr_warn("%s: HPI error %d. Command response %#x\n",
+ mmc_hostname(host), err, cmd.resp[0]);
return err;
}
- if (status)
- *status = cmd.resp[0];
- return 0;
+ /* No need to poll when using HW busy detection. */
+ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
+ return 0;
+
+ /* Let's poll to find out when the HPI request completes. */
+ return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
}
/**
@@ -831,11 +903,10 @@ static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
* Issued High Priority Interrupt, and check for card status
* until out-of prg-state.
*/
-int mmc_interrupt_hpi(struct mmc_card *card)
+static int mmc_interrupt_hpi(struct mmc_card *card)
{
int err;
u32 status;
- unsigned long prg_wait;
if (!card->ext_csd.hpi_en) {
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
@@ -868,25 +939,12 @@ int mmc_interrupt_hpi(struct mmc_card *card)
goto out;
}
- err = mmc_send_hpi_cmd(card, &status);
- if (err)
- goto out;
-
- prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
- do {
- err = mmc_send_status(card, &status);
-
- if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
- break;
- if (time_after(jiffies, prg_wait))
- err = -ETIMEDOUT;
- } while (!err);
-
+ err = mmc_send_hpi_cmd(card);
out:
return err;
}
-int mmc_can_ext_csd(struct mmc_card *card)
+bool mmc_card_can_ext_csd(struct mmc_card *card)
{
return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
}
@@ -939,36 +997,22 @@ void mmc_run_bkops(struct mmc_card *card)
* urgent levels by using an asynchronous background task, when idle.
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
- if (err)
- pr_warn("%s: Error %d starting bkops\n",
+ EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
+ /*
+ * If the BKOPS timed out, the card is probably still busy in the
+ * R1_STATE_PRG. Rather than continue to wait, let's try to abort
+ * it with a HPI command to get back into R1_STATE_TRAN.
+ */
+ if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
+ pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
+ else if (err)
+ pr_warn("%s: Error %d running bkops\n",
mmc_hostname(card->host), err);
mmc_retune_release(card->host);
}
EXPORT_SYMBOL(mmc_run_bkops);
-/*
- * Flush the cache to the non-volatile storage.
- */
-int mmc_flush_cache(struct mmc_card *card)
-{
- int err = 0;
-
- if (mmc_card_mmc(card) &&
- (card->ext_csd.cache_size > 0) &&
- (card->ext_csd.cache_ctrl & 1)) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_FLUSH_CACHE, 1, 0);
- if (err)
- pr_err("%s: cache flush error %d\n",
- mmc_hostname(card->host), err);
- }
-
- return err;
-}
-EXPORT_SYMBOL(mmc_flush_cache);
-
static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
{
u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
@@ -996,3 +1040,112 @@ int mmc_cmdq_disable(struct mmc_card *card)
return mmc_cmdq_switch(card, false);
}
EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
+
+int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
+{
+ struct mmc_host *host = card->host;
+ int err;
+
+ if (!mmc_card_can_sanitize(card)) {
+ pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
+ return -EOPNOTSUPP;
+ }
+
+ if (!timeout_ms)
+ timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
+
+ pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
+
+ mmc_retune_hold(host);
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
+ 1, timeout_ms, 0, true, false, 0);
+ if (err)
+ pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
+
+ /*
+ * If the sanitize operation timed out, the card is probably still busy
+ * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
+ * it with a HPI command to get back into R1_STATE_TRAN.
+ */
+ if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
+ pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
+
+ mmc_retune_release(host);
+
+ pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mmc_sanitize);
+
+/**
+ * mmc_read_tuning() - read data blocks from the mmc
+ * @host: mmc host doing the read
+ * @blksz: data block size
+ * @blocks: number of blocks to read
+ *
+ * Read one or more blocks of data from the beginning of the mmc. This is a
+ * low-level helper for tuning operation. It is assumed that CMD23 can be used
+ * for multi-block read if the host supports it.
+ *
+ * Note: Allocate and free a temporary buffer to store the data read. The data
+ * is not available outside of the function, only the status of the read
+ * operation.
+ *
+ * Return: 0 in case of success, otherwise -EIO / -ENOMEM / -E2BIG
+ */
+int mmc_read_tuning(struct mmc_host *host, unsigned int blksz, unsigned int blocks)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command sbc = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+ void *buf;
+ unsigned int len;
+
+ if (blocks > 1) {
+ if (mmc_host_can_cmd23(host)) {
+ mrq.sbc = &sbc;
+ sbc.opcode = MMC_SET_BLOCK_COUNT;
+ sbc.arg = blocks;
+ sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ }
+ cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
+ mrq.stop = &stop;
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.opcode = MMC_READ_SINGLE_BLOCK;
+ }
+
+ mrq.cmd = &cmd;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ mrq.data = &data;
+ data.flags = MMC_DATA_READ;
+ data.blksz = blksz;
+ data.blocks = blocks;
+ data.blk_addr = 0;
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.timeout_ns = 1000000000;
+
+ if (check_mul_overflow(blksz, blocks, &len))
+ return -E2BIG;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ sg_init_one(&sg, buf, len);
+
+ mmc_wait_for_req(host, &mrq);
+ kfree(buf);
+
+ if (sbc.error || cmd.error || data.error)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_read_tuning);