diff options
Diffstat (limited to 'drivers/mmc/core/mmc_ops.c')
| -rw-r--r-- | drivers/mmc/core/mmc_ops.c | 429 |
1 files changed, 257 insertions, 172 deletions
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index baa6314f69b4..a952cc8265af 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -20,8 +20,9 @@ #include "mmc_ops.h" #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ -#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ +#define MMC_OP_COND_PERIOD_US (4 * 1000) /* 4ms */ +#define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */ static const u8 tuning_blk_pattern_4bit[] = { 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, @@ -53,6 +54,18 @@ static const u8 tuning_blk_pattern_8bit[] = { 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, }; +struct mmc_busy_data { + struct mmc_card *card; + bool retry_crc_err; + enum mmc_busy_cmd busy_cmd; +}; + +struct mmc_op_cond_busy_data { + struct mmc_host *host; + u32 ocr; + struct mmc_command *cmd; +}; + int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) { int err; @@ -131,10 +144,24 @@ int mmc_set_dsr(struct mmc_host *host) return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); } +int __mmc_go_idle(struct mmc_host *host) +{ + struct mmc_command cmd = {}; + int err; + + cmd.opcode = MMC_GO_IDLE_STATE; + cmd.arg = 0; + cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; + + err = mmc_wait_for_cmd(host, &cmd, 0); + mmc_delay(1); + + return err; +} + int mmc_go_idle(struct mmc_host *host) { int err; - struct mmc_command cmd = {}; /* * Non-SPI hosts need to prevent chipselect going active during @@ -150,13 +177,7 @@ int mmc_go_idle(struct mmc_host *host) mmc_delay(1); } - cmd.opcode = MMC_GO_IDLE_STATE; - cmd.arg = 0; - cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; - - err = mmc_wait_for_cmd(host, &cmd, 0); - - mmc_delay(1); + err = __mmc_go_idle(host); if (!mmc_host_is_spi(host)) { mmc_set_chip_select(host, MMC_CS_DONTCARE); @@ -168,43 +189,64 @@ int mmc_go_idle(struct mmc_host *host) return err; } +static int __mmc_send_op_cond_cb(void *cb_data, bool *busy) +{ + struct mmc_op_cond_busy_data *data = cb_data; + struct mmc_host *host = data->host; + struct mmc_command *cmd = data->cmd; + u32 ocr = data->ocr; + int err = 0; + + err = mmc_wait_for_cmd(host, cmd, 0); + if (err) + return err; + + if (mmc_host_is_spi(host)) { + if (!(cmd->resp[0] & R1_SPI_IDLE)) { + *busy = false; + return 0; + } + } else { + if (cmd->resp[0] & MMC_CARD_BUSY) { + *busy = false; + return 0; + } + } + + *busy = true; + + /* + * According to eMMC specification v5.1 section 6.4.3, we + * should issue CMD1 repeatedly in the idle state until + * the eMMC is ready. Otherwise some eMMC devices seem to enter + * the inactive mode after mmc_init_card() issued CMD0 when + * the eMMC device is busy. + */ + if (!ocr && !mmc_host_is_spi(host)) + cmd->arg = cmd->resp[0] | BIT(30); + + return 0; +} + int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) { struct mmc_command cmd = {}; - int i, err = 0; + int err = 0; + struct mmc_op_cond_busy_data cb_data = { + .host = host, + .ocr = ocr, + .cmd = &cmd + }; cmd.opcode = MMC_SEND_OP_COND; cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; - for (i = 100; i; i--) { - err = mmc_wait_for_cmd(host, &cmd, 0); - if (err) - break; - - /* wait until reset completes */ - if (mmc_host_is_spi(host)) { - if (!(cmd.resp[0] & R1_SPI_IDLE)) - break; - } else { - if (cmd.resp[0] & MMC_CARD_BUSY) - break; - } - - err = -ETIMEDOUT; - - mmc_delay(10); - - /* - * According to eMMC specification v5.1 section 6.4.3, we - * should issue CMD1 repeatedly in the idle state until - * the eMMC is ready. Otherwise some eMMC devices seem to enter - * the inactive mode after mmc_init_card() issued CMD0 when - * the eMMC device is busy. - */ - if (!ocr && !mmc_host_is_spi(host)) - cmd.arg = cmd.resp[0] | BIT(30); - } + err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US, + MMC_OP_COND_TIMEOUT_MS, + &__mmc_send_op_cond_cb, &cb_data); + if (err) + return err; if (rocr && !mmc_host_is_spi(host)) *rocr = cmd.resp[0]; @@ -246,9 +288,8 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) * NOTE: void *buf, caller for the buf is required to use DMA-capable * buffer or on-stack buffer (with some overhead in callee). */ -static int -mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, - u32 opcode, void *buf, unsigned len) +int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, + u32 args, void *buf, unsigned len) { struct mmc_request mrq = {}; struct mmc_command cmd = {}; @@ -259,7 +300,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, mrq.data = &data; cmd.opcode = opcode; - cmd.arg = 0; + cmd.arg = args; /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we * rely on callers to never use this with "native" calls for reading @@ -296,61 +337,40 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, return 0; } -static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd) +static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode) { int ret, i; - __be32 *csd_tmp; + __be32 *cxd_tmp; - csd_tmp = kzalloc(16, GFP_KERNEL); - if (!csd_tmp) + cxd_tmp = kzalloc(16, GFP_KERNEL); + if (!cxd_tmp) return -ENOMEM; - ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16); + ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16); if (ret) goto err; for (i = 0; i < 4; i++) - csd[i] = be32_to_cpu(csd_tmp[i]); + cxd[i] = be32_to_cpu(cxd_tmp[i]); err: - kfree(csd_tmp); + kfree(cxd_tmp); return ret; } int mmc_send_csd(struct mmc_card *card, u32 *csd) { if (mmc_host_is_spi(card->host)) - return mmc_spi_send_csd(card, csd); + return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD); return mmc_send_cxd_native(card->host, card->rca << 16, csd, MMC_SEND_CSD); } -static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid) -{ - int ret, i; - __be32 *cid_tmp; - - cid_tmp = kzalloc(16, GFP_KERNEL); - if (!cid_tmp) - return -ENOMEM; - - ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16); - if (ret) - goto err; - - for (i = 0; i < 4; i++) - cid[i] = be32_to_cpu(cid_tmp[i]); - -err: - kfree(cid_tmp); - return ret; -} - int mmc_send_cid(struct mmc_host *host, u32 *cid) { if (mmc_host_is_spi(host)) - return mmc_spi_send_cid(host, cid); + return mmc_spi_send_cxd(host, cid, MMC_SEND_CID); return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); } @@ -363,7 +383,7 @@ int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) if (!card || !new_ext_csd) return -EINVAL; - if (!mmc_can_ext_csd(card)) + if (!mmc_card_can_ext_csd(card)) return -EOPNOTSUPP; /* @@ -374,7 +394,7 @@ int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) if (!ext_csd) return -ENOMEM; - err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd, + err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd, 512); if (err) kfree(ext_csd); @@ -445,34 +465,36 @@ int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) return mmc_switch_status_error(card->host, status); } -static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err, - enum mmc_busy_cmd busy_cmd, bool *busy) +static int mmc_busy_cb(void *cb_data, bool *busy) { - struct mmc_host *host = card->host; + struct mmc_busy_data *data = cb_data; + struct mmc_host *host = data->card->host; u32 status = 0; int err; - if (host->ops->card_busy) { + if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) { *busy = host->ops->card_busy(host); return 0; } - err = mmc_send_status(card, &status); - if (retry_crc_err && err == -EILSEQ) { + err = mmc_send_status(data->card, &status); + if (data->retry_crc_err && err == -EILSEQ) { *busy = true; return 0; } if (err) return err; - switch (busy_cmd) { + switch (data->busy_cmd) { case MMC_BUSY_CMD6: - err = mmc_switch_status_error(card->host, status); + err = mmc_switch_status_error(host, status); break; case MMC_BUSY_ERASE: err = R1_STATUS(status) ? -EIO : 0; break; case MMC_BUSY_HPI: + case MMC_BUSY_EXTR_SINGLE: + case MMC_BUSY_IO: break; default: err = -EINVAL; @@ -485,27 +507,17 @@ static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err, return 0; } -static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, - bool send_status, bool retry_crc_err, - enum mmc_busy_cmd busy_cmd) +int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us, + unsigned int timeout_ms, + int (*busy_cb)(void *cb_data, bool *busy), + void *cb_data) { - struct mmc_host *host = card->host; int err; unsigned long timeout; - unsigned int udelay = 32, udelay_max = 32768; + unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768; bool expired = false; bool busy = false; - /* - * In cases when not allowed to poll by using CMD13 or because we aren't - * capable of polling by using ->card_busy(), then rely on waiting the - * stated timeout to be sufficient. - */ - if (!send_status && !host->ops->card_busy) { - mmc_delay(timeout_ms); - return 0; - } - timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; do { /* @@ -514,7 +526,7 @@ static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, */ expired = time_after(jiffies, timeout); - err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy); + err = (*busy_cb)(cb_data, &busy); if (err) return err; @@ -535,12 +547,43 @@ static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, return 0; } +EXPORT_SYMBOL_GPL(__mmc_poll_for_busy); int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, - enum mmc_busy_cmd busy_cmd) + bool retry_crc_err, enum mmc_busy_cmd busy_cmd) +{ + struct mmc_host *host = card->host; + struct mmc_busy_data cb_data; + + cb_data.card = card; + cb_data.retry_crc_err = retry_crc_err; + cb_data.busy_cmd = busy_cmd; + + return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data); +} +EXPORT_SYMBOL_GPL(mmc_poll_for_busy); + +bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd, + unsigned int timeout_ms) { - return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd); + /* + * If the max_busy_timeout of the host is specified, make sure it's + * enough to fit the used timeout_ms. In case it's not, let's instruct + * the host to avoid HW busy detection, by converting to a R1 response + * instead of a R1B. Note, some hosts requires R1B, which also means + * they are on their own when it comes to deal with the busy timeout. + */ + if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && + (timeout_ms > host->max_busy_timeout)) { + cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1; + return false; + } + + cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B; + cmd->busy_timeout = timeout_ms; + return true; } +EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd); /** * __mmc_switch - modify EXT_CSD register @@ -553,17 +596,18 @@ int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, * @timing: new timing to change to * @send_status: send status cmd to poll for busy * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy + * @retries: number of retries * * Modifies the EXT_CSD register for selected card. */ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsigned int timeout_ms, unsigned char timing, - bool send_status, bool retry_crc_err) + bool send_status, bool retry_crc_err, unsigned int retries) { struct mmc_host *host = card->host; int err; struct mmc_command cmd = {}; - bool use_r1b_resp = true; + bool use_r1b_resp; unsigned char old_timing = host->ios.timing; mmc_retune_hold(host); @@ -574,31 +618,14 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, timeout_ms = card->ext_csd.generic_cmd6_time; } - /* - * If the max_busy_timeout of the host is specified, make sure it's - * enough to fit the used timeout_ms. In case it's not, let's instruct - * the host to avoid HW busy detection, by converting to a R1 response - * instead of a R1B. Note, some hosts requires R1B, which also means - * they are on their own when it comes to deal with the busy timeout. - */ - if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && - (timeout_ms > host->max_busy_timeout)) - use_r1b_resp = false; - cmd.opcode = MMC_SWITCH; cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | (index << 16) | (value << 8) | set; - cmd.flags = MMC_CMD_AC; - if (use_r1b_resp) { - cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; - cmd.busy_timeout = timeout_ms; - } else { - cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; - } + use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms); - err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); + err = mmc_wait_for_cmd(host, &cmd, retries); if (err) goto out; @@ -607,9 +634,18 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, mmc_host_is_spi(host)) goto out_tim; + /* + * If the host doesn't support HW polling via the ->card_busy() ops and + * when it's not allowed to poll by using CMD13, then we need to rely on + * waiting the stated timeout to be sufficient. + */ + if (!send_status && !host->ops->card_busy) { + mmc_delay(timeout_ms); + goto out_tim; + } + /* Let's try to poll to find out when the command is completed. */ - err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err, - MMC_BUSY_CMD6); + err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6); if (err) goto out; @@ -633,7 +669,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsigned int timeout_ms) { return __mmc_switch(card, set, index, value, timeout_ms, 0, - true, false); + true, false, MMC_CMD_RETRIES); } EXPORT_SYMBOL_GPL(mmc_switch); @@ -706,7 +742,7 @@ out: } EXPORT_SYMBOL_GPL(mmc_send_tuning); -int mmc_abort_tuning(struct mmc_host *host, u32 opcode) +int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode) { struct mmc_command cmd = {}; @@ -729,7 +765,7 @@ int mmc_abort_tuning(struct mmc_host *host, u32 opcode) return mmc_wait_for_cmd(host, &cmd, 0); } -EXPORT_SYMBOL_GPL(mmc_abort_tuning); +EXPORT_SYMBOL_GPL(mmc_send_abort_tuning); static int mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, @@ -833,28 +869,17 @@ static int mmc_send_hpi_cmd(struct mmc_card *card) { unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; struct mmc_host *host = card->host; - bool use_r1b_resp = true; + bool use_r1b_resp = false; struct mmc_command cmd = {}; int err; cmd.opcode = card->ext_csd.hpi_cmd; cmd.arg = card->rca << 16 | 1; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - /* - * Make sure the host's max_busy_timeout fit the needed timeout for HPI. - * In case it doesn't, let's instruct the host to avoid HW busy - * detection, by using a R1 response instead of R1B. - */ - if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout) - use_r1b_resp = false; - - if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) { - cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; - cmd.busy_timeout = busy_timeout_ms; - } else { - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - use_r1b_resp = false; - } + if (cmd.opcode == MMC_STOP_TRANSMISSION) + use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, + busy_timeout_ms); err = mmc_wait_for_cmd(host, &cmd, 0); if (err) { @@ -868,7 +893,7 @@ static int mmc_send_hpi_cmd(struct mmc_card *card) return 0; /* Let's poll to find out when the HPI request completes. */ - return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI); + return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI); } /** @@ -919,7 +944,7 @@ out: return err; } -int mmc_can_ext_csd(struct mmc_card *card) +bool mmc_card_can_ext_csd(struct mmc_card *card) { return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); } @@ -973,36 +998,21 @@ void mmc_run_bkops(struct mmc_card *card) */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); - if (err) - pr_warn("%s: Error %d starting bkops\n", + /* + * If the BKOPS timed out, the card is probably still busy in the + * R1_STATE_PRG. Rather than continue to wait, let's try to abort + * it with a HPI command to get back into R1_STATE_TRAN. + */ + if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) + pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host)); + else if (err) + pr_warn("%s: Error %d running bkops\n", mmc_hostname(card->host), err); mmc_retune_release(card->host); } EXPORT_SYMBOL(mmc_run_bkops); -/* - * Flush the cache to the non-volatile storage. - */ -int mmc_flush_cache(struct mmc_card *card) -{ - int err = 0; - - if (mmc_card_mmc(card) && - (card->ext_csd.cache_size > 0) && - (card->ext_csd.cache_ctrl & 1)) { - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_FLUSH_CACHE, 1, - MMC_CACHE_FLUSH_TIMEOUT_MS); - if (err) - pr_err("%s: cache flush error %d\n", - mmc_hostname(card->host), err); - } - - return err; -} -EXPORT_SYMBOL(mmc_flush_cache); - static int mmc_cmdq_switch(struct mmc_card *card, bool enable) { u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; @@ -1031,22 +1041,25 @@ int mmc_cmdq_disable(struct mmc_card *card) } EXPORT_SYMBOL_GPL(mmc_cmdq_disable); -int mmc_sanitize(struct mmc_card *card) +int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms) { struct mmc_host *host = card->host; int err; - if (!mmc_can_sanitize(card)) { + if (!mmc_card_can_sanitize(card)) { pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); return -EOPNOTSUPP; } + if (!timeout_ms) + timeout_ms = MMC_SANITIZE_TIMEOUT_MS; + pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); mmc_retune_hold(host); - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, - 1, MMC_SANITIZE_TIMEOUT_MS); + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, + 1, timeout_ms, 0, true, false, 0); if (err) pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); @@ -1064,3 +1077,75 @@ int mmc_sanitize(struct mmc_card *card) return err; } EXPORT_SYMBOL_GPL(mmc_sanitize); + +/** + * mmc_read_tuning() - read data blocks from the mmc + * @host: mmc host doing the read + * @blksz: data block size + * @blocks: number of blocks to read + * + * Read one or more blocks of data from the beginning of the mmc. This is a + * low-level helper for tuning operation. It is assumed that CMD23 can be used + * for multi-block read if the host supports it. + * + * Note: Allocate and free a temporary buffer to store the data read. The data + * is not available outside of the function, only the status of the read + * operation. + * + * Return: 0 in case of success, otherwise -EIO / -ENOMEM / -E2BIG + */ +int mmc_read_tuning(struct mmc_host *host, unsigned int blksz, unsigned int blocks) +{ + struct mmc_request mrq = {}; + struct mmc_command sbc = {}; + struct mmc_command cmd = {}; + struct mmc_command stop = {}; + struct mmc_data data = {}; + struct scatterlist sg; + void *buf; + unsigned int len; + + if (blocks > 1) { + if (mmc_host_can_cmd23(host)) { + mrq.sbc = &sbc; + sbc.opcode = MMC_SET_BLOCK_COUNT; + sbc.arg = blocks; + sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; + } + cmd.opcode = MMC_READ_MULTIPLE_BLOCK; + mrq.stop = &stop; + stop.opcode = MMC_STOP_TRANSMISSION; + stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; + } else { + cmd.opcode = MMC_READ_SINGLE_BLOCK; + } + + mrq.cmd = &cmd; + cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; + + mrq.data = &data; + data.flags = MMC_DATA_READ; + data.blksz = blksz; + data.blocks = blocks; + data.blk_addr = 0; + data.sg = &sg; + data.sg_len = 1; + data.timeout_ns = 1000000000; + + if (check_mul_overflow(blksz, blocks, &len)) + return -E2BIG; + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + sg_init_one(&sg, buf, len); + + mmc_wait_for_req(host, &mrq); + kfree(buf); + + if (sbc.error || cmd.error || data.error) + return -EIO; + + return 0; +} +EXPORT_SYMBOL_GPL(mmc_read_tuning); |
