summaryrefslogtreecommitdiff
path: root/drivers/mmc/core/block.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/core/block.c')
-rw-r--r--drivers/mmc/core/block.c547
1 files changed, 426 insertions, 121 deletions
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 00c33edb9fb9..fb6eb2d79b4f 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -33,6 +33,7 @@
#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <linux/string_helpers.h>
#include <linux/delay.h>
#include <linux/capability.h>
@@ -40,6 +41,7 @@
#include <linux/pm_runtime.h>
#include <linux/idr.h>
#include <linux/debugfs.h>
+#include <linux/rpmb.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
@@ -48,6 +50,7 @@
#include <linux/mmc/sd.h>
#include <linux/uaccess.h>
+#include <linux/unaligned.h>
#include "queue.h"
#include "block.h"
@@ -76,6 +79,10 @@ MODULE_ALIAS("mmc:block");
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
+#define RPMB_FRAME_SIZE sizeof(struct rpmb_frame)
+#define CHECK_SIZE_NEQ(val) ((val) != sizeof(struct rpmb_frame))
+#define CHECK_SIZE_ALIGNED(val) IS_ALIGNED((val), sizeof(struct rpmb_frame))
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -144,7 +151,7 @@ struct mmc_blk_data {
static dev_t mmc_rpmb_devt;
/* Bus type for RPMB character devices */
-static struct bus_type mmc_rpmb_bus_type = {
+static const struct bus_type mmc_rpmb_bus_type = {
.name = "mmc_rpmb",
};
@@ -155,6 +162,7 @@ static struct bus_type mmc_rpmb_bus_type = {
* @id: unique device ID number
* @part_index: partition index (0 on first)
* @md: parent MMC block device
+ * @rdev: registered RPMB device
* @node: list item, so we can put this device on a list
*/
struct mmc_rpmb_data {
@@ -163,6 +171,7 @@ struct mmc_rpmb_data {
int id;
unsigned int part_index;
struct mmc_blk_data *md;
+ struct rpmb_dev *rdev;
struct list_head node;
};
@@ -178,6 +187,8 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
int recovery_mode,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
+static int mmc_spi_err_check(struct mmc_card *card);
+static int mmc_blk_busy_cb(void *cb_data, bool *busy);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
@@ -204,7 +215,7 @@ static void mmc_blk_kref_release(struct kref *ref)
int devidx;
devidx = mmc_get_devidx(md->disk);
- ida_simple_remove(&mmc_blk_ida, devidx);
+ ida_free(&mmc_blk_ida, devidx);
mutex_lock(&open_lock);
md->disk->private_data = NULL;
@@ -232,7 +243,7 @@ static ssize_t power_ro_lock_show(struct device *dev,
else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
locked = 1;
- ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+ ret = sysfs_emit(buf, "%d\n", locked);
mmc_blk_put(md);
@@ -264,6 +275,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
blk_mq_free_request(req);
@@ -293,9 +305,9 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
int ret;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
- ret = snprintf(buf, PAGE_SIZE, "%d\n",
- get_disk_ro(dev_to_disk(dev)) ^
- md->read_only);
+ ret = sysfs_emit(buf, "%d\n",
+ get_disk_ro(dev_to_disk(dev)) ^
+ md->read_only);
mmc_blk_put(md);
return ret;
}
@@ -304,10 +316,10 @@ static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
- char *end;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
- unsigned long set = simple_strtoul(buf, &end, 0);
- if (end == buf) {
+ unsigned long set;
+
+ if (kstrtoul(buf, 0, &set)) {
ret = -EINVAL;
goto out;
}
@@ -337,10 +349,10 @@ static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj,
if (a == &dev_attr_ro_lock_until_next_power_on.attr &&
(md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
md->queue.card->ext_csd.boot_ro_lockable) {
- mode = S_IRUGO;
+ mode = 0444;
if (!(md->queue.card->ext_csd.boot_ro_lock &
EXT_CSD_BOOT_WP_B_PWR_WP_DIS))
- mode |= S_IWUSR;
+ mode |= 0200;
}
mmc_blk_put(md);
@@ -357,15 +369,15 @@ static const struct attribute_group *mmc_disk_attr_groups[] = {
NULL,
};
-static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
+static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode)
{
- struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
+ struct mmc_blk_data *md = mmc_blk_get(disk);
int ret = -ENXIO;
mutex_lock(&block_mutex);
if (md) {
ret = 0;
- if ((mode & FMODE_WRITE) && md->read_only) {
+ if ((mode & BLK_OPEN_WRITE) && md->read_only) {
mmc_blk_put(md);
ret = -EROFS;
}
@@ -375,7 +387,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
+static void mmc_blk_release(struct gendisk *disk)
{
struct mmc_blk_data *md = disk->private_data;
@@ -385,9 +397,9 @@ static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
}
static int
-mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+mmc_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ geo->cylinders = get_capacity(disk) / (4 * 16);
geo->heads = 4;
geo->sectors = 16;
return 0;
@@ -397,6 +409,10 @@ struct mmc_blk_ioc_data {
struct mmc_ioc_cmd ic;
unsigned char *buf;
u64 buf_bytes;
+ unsigned int flags;
+#define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */
+#define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */
+
struct mmc_rpmb_data *rpmb;
};
@@ -406,7 +422,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
struct mmc_blk_ioc_data *idata;
int err;
- idata = kmalloc(sizeof(*idata), GFP_KERNEL);
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
if (!idata) {
err = -ENOMEM;
goto out;
@@ -462,20 +478,28 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
}
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
- struct mmc_blk_ioc_data *idata)
+ struct mmc_blk_ioc_data **idatas, int i)
{
struct mmc_command cmd = {}, sbc = {};
struct mmc_data data = {};
struct mmc_request mrq = {};
struct scatterlist sg;
- bool r1b_resp, use_r1b_resp = false;
+ bool r1b_resp;
unsigned int busy_timeout_ms;
int err;
unsigned int target_part;
+ struct mmc_blk_ioc_data *idata = idatas[i];
+ struct mmc_blk_ioc_data *prev_idata = NULL;
if (!card || !md || !idata)
return -EINVAL;
+ if (idata->flags & MMC_BLK_IOC_DROP)
+ return 0;
+
+ if (idata->flags & MMC_BLK_IOC_SBC && i > 0)
+ prev_idata = idatas[i - 1];
+
/*
* The RPMB accesses comes in from the character device, so we
* need to target these explicitly. Else we just target the
@@ -529,7 +553,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
return err;
}
- if (idata->rpmb) {
+ if (idata->rpmb || prev_idata) {
sbc.opcode = MMC_SET_BLOCK_COUNT;
/*
* We don't do any blockcount validation because the max size
@@ -537,6 +561,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
* 'Reliable Write' bit here.
*/
sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
+ if (prev_idata)
+ sbc.arg = prev_idata->ic.arg;
sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
mrq.sbc = &sbc;
}
@@ -549,12 +575,20 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS;
r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B;
if (r1b_resp)
- use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd,
- busy_timeout_ms);
+ mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms);
mmc_wait_for_req(card->host, &mrq);
memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
+ if (prev_idata) {
+ memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp));
+ if (sbc.error) {
+ dev_err(mmc_dev(card->host), "%s: sbc error %d\n",
+ __func__, sbc.error);
+ return sbc.error;
+ }
+ }
+
if (cmd.error) {
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
__func__, cmd.error);
@@ -603,14 +637,28 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->ic.postsleep_min_us)
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
- /* No need to poll when using HW busy detection. */
- if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
- return 0;
+ if (mmc_host_is_spi(card->host)) {
+ if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY)
+ return mmc_spi_err_check(card);
+ return err;
+ }
- /* Ensure RPMB/R1B command has completed by polling with CMD13. */
- if (idata->rpmb || r1b_resp)
- err = mmc_poll_for_busy(card, busy_timeout_ms, false,
- MMC_BUSY_IO);
+ /*
+ * Ensure RPMB, writes and R1B responses are completed by polling with
+ * CMD13. Note that, usually we don't need to poll when using HW busy
+ * detection, but here it's needed since some commands may indicate the
+ * error through the R1 status bits.
+ */
+ if (idata->rpmb || idata->ic.write_flag || r1b_resp) {
+ struct mmc_blk_busy_data cb_data = {
+ .card = card,
+ };
+
+ err = __mmc_poll_for_busy(card->host, 0, busy_timeout_ms,
+ &mmc_blk_busy_cb, &cb_data);
+
+ idata->ic.response[0] = cb_data.status;
+ }
return err;
}
@@ -651,6 +699,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idatas[0] = idata;
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
blk_execute_rq(req, false);
@@ -722,6 +771,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
}
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = n;
blk_execute_rq(req, false);
@@ -754,7 +804,7 @@ static int mmc_blk_check_blkdev(struct block_device *bdev)
return 0;
}
-static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mmc_blk_data *md;
@@ -791,7 +841,7 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
}
#ifdef CONFIG_COMPAT
-static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
@@ -833,9 +883,11 @@ static const struct block_device_operations mmc_bdops = {
static int mmc_blk_part_switch_pre(struct mmc_card *card,
unsigned int part_type)
{
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
+ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ if ((part_type & mask) == rpmb) {
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (ret)
@@ -850,9 +902,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
static int mmc_blk_part_switch_post(struct mmc_card *card,
unsigned int part_type)
{
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
+ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ if ((part_type & mask) == rpmb) {
mmc_retune_unpause(card->host);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
ret = mmc_cmdq_enable(card);
@@ -902,30 +956,22 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
int err;
u32 result;
__be32 *blocks;
+ u8 resp_sz = mmc_card_ult_capacity(card) ? 8 : 4;
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
-
struct scatterlist sg;
- cmd.opcode = MMC_APP_CMD;
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
-
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ err = mmc_app_cmd(card->host, card);
if (err)
return err;
- if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
- return -EIO;
-
- memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- data.blksz = 4;
+ data.blksz = resp_sz;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
@@ -935,15 +981,27 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
mrq.cmd = &cmd;
mrq.data = &data;
- blocks = kmalloc(4, GFP_KERNEL);
+ blocks = kmalloc(resp_sz, GFP_NOIO);
if (!blocks)
return -ENOMEM;
- sg_init_one(&sg, blocks, 4);
+ sg_init_one(&sg, blocks, resp_sz);
mmc_wait_for_req(card->host, &mrq);
- result = ntohl(*blocks);
+ if (mmc_card_ult_capacity(card)) {
+ /*
+ * Normally, ACMD22 returns the number of written sectors as
+ * u32. SDUC, however, returns it as u64. This is not a
+ * superfluous requirement, because SDUC writes may exceed 2TB.
+ * For Linux mmc however, the previously write operation could
+ * not be more than the block layer limits, thus just make room
+ * for a u64 and cast the response back to u32.
+ */
+ result = clamp_val(get_unaligned_be64(blocks), 0, UINT_MAX);
+ } else {
+ result = ntohl(*blocks);
+ }
kfree(blocks);
if (cmd.error || data.error)
@@ -1022,6 +1080,20 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
md->reset_done &= ~type;
}
+static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq)
+{
+ struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data;
+ int i;
+
+ for (i = 1; i < mq_rq->ioc_count; i++) {
+ if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT &&
+ mmc_op_multi(idata[i]->ic.opcode)) {
+ idata[i - 1]->flags |= MMC_BLK_IOC_DROP;
+ idata[i]->flags |= MMC_BLK_IOC_SBC;
+ }
+ }
+}
+
/*
* The non-block commands come back from the block layer after it queued it and
* processed it with all other requests and then they get issued in this
@@ -1049,11 +1121,14 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
if (ret)
break;
}
+
+ mmc_blk_check_sbc(mq_rq);
+
fallthrough;
case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
- ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+ ret = __mmc_blk_ioctl_cmd(card, md, idata, i);
if (ret)
break;
}
@@ -1099,11 +1174,12 @@ static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req,
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
- unsigned int from, nr;
+ unsigned int nr;
+ sector_t from;
int err = 0;
blk_status_t status = BLK_STS_OK;
- if (!mmc_can_erase(card)) {
+ if (!mmc_card_can_erase(card)) {
status = BLK_STS_NOTSUPP;
goto fail;
}
@@ -1154,11 +1230,12 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
- unsigned int from, nr, arg;
+ unsigned int nr, arg;
+ sector_t from;
int err = 0, type = MMC_BLK_SECDISCARD;
blk_status_t status = BLK_STS_OK;
- if (!(mmc_can_secure_erase_trim(card))) {
+ if (!(mmc_card_can_secure_erase_trim(card))) {
status = BLK_STS_NOTSUPP;
goto out;
}
@@ -1166,7 +1243,7 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
- if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+ if (mmc_card_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
arg = MMC_SECURE_TRIM1_ARG;
else
arg = MMC_SECURE_ERASE_ARG;
@@ -1472,6 +1549,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
+ } else if (mq->in_recovery) {
+ blk_mq_requeue_request(req, true);
} else {
blk_mq_end_request(req, BLK_STS_OK);
}
@@ -1648,8 +1727,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
* these, while retaining features like reliable writes.
*/
if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
- (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
- do_data_tag)) {
+ (do_rel_wr || !mmc_card_blk_no_cmd23(card) || do_data_tag)) {
brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
brq->sbc.arg = brq->data.blocks |
(do_rel_wr ? (1 << 31) : 0) |
@@ -1657,6 +1735,11 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
+
+ if (mmc_card_ult_capacity(card)) {
+ brq->cmd.ext_addr = blk_rq_pos(req) >> 32;
+ brq->cmd.has_ext_addr = true;
+ }
}
#define MMC_MAX_RETRIES 5
@@ -2088,14 +2171,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
mmc_blk_urgent_bkops(mq, mqrq);
}
-static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
{
unsigned long flags;
bool put_card;
spin_lock_irqsave(&mq->lock, flags);
- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+ mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
@@ -2108,6 +2191,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
bool can_sleep)
{
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
struct mmc_host *host = mq->card->host;
@@ -2127,7 +2211,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
blk_mq_complete_request(req);
}
- mmc_blk_mq_dec_in_flight(mq, req);
+ mmc_blk_mq_dec_in_flight(mq, issue_type);
}
void mmc_blk_mq_recovery(struct mmc_queue *mq)
@@ -2152,7 +2236,7 @@ void mmc_blk_mq_recovery(struct mmc_queue *mq)
static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
struct request **prev_req)
{
- if (mmc_host_done_complete(mq->card->host))
+ if (mmc_host_can_done_complete(mq->card->host))
return;
mutex_lock(&mq->complete_lock);
@@ -2191,7 +2275,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
struct mmc_host *host = mq->card->host;
unsigned long flags;
- if (!mmc_host_done_complete(host)) {
+ if (!mmc_host_can_done_complete(host)) {
bool waiting;
/*
@@ -2304,7 +2388,7 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
mq->rw_wait = false;
/* Release re-tuning here where there is no synchronization required */
- if (err || mmc_host_done_complete(host))
+ if (err || mmc_host_can_done_complete(host))
mmc_retune_release(host);
out_post_req:
@@ -2370,8 +2454,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
}
ret = mmc_blk_cqe_issue_flush(mq, req);
break;
- case REQ_OP_READ:
case REQ_OP_WRITE:
+ card->written_flag = true;
+ fallthrough;
+ case REQ_OP_READ:
if (host->cqe_enabled)
ret = mmc_blk_cqe_issue_rw_rq(mq, req);
else
@@ -2396,6 +2482,56 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
+/*
+ * Search for a declared partitions node for the disk in mmc-card related node.
+ *
+ * This is to permit support for partition table defined in DT in special case
+ * where a partition table is not written in the disk and is expected to be
+ * passed from the running system.
+ *
+ * For the user disk, "partitions" node is searched.
+ * For the special HW disk, "partitions-" node with the appended name is used
+ * following this conversion table (to adhere to JEDEC naming)
+ * - boot0 -> partitions-boot1
+ * - boot1 -> partitions-boot2
+ * - gp0 -> partitions-gp1
+ * - gp1 -> partitions-gp2
+ * - gp2 -> partitions-gp3
+ * - gp3 -> partitions-gp4
+ */
+static struct fwnode_handle *mmc_blk_get_partitions_node(struct device *mmc_dev,
+ const char *subname)
+{
+ const char *node_name = "partitions";
+
+ if (subname) {
+ mmc_dev = mmc_dev->parent;
+
+ /*
+ * Check if we are allocating a BOOT disk boot0/1 disk.
+ * In DT we use the JEDEC naming boot1/2.
+ */
+ if (!strcmp(subname, "boot0"))
+ node_name = "partitions-boot1";
+ if (!strcmp(subname, "boot1"))
+ node_name = "partitions-boot2";
+ /*
+ * Check if we are allocating a GP disk gp0/1/2/3 disk.
+ * In DT we use the JEDEC naming gp1/2/3/4.
+ */
+ if (!strcmp(subname, "gp0"))
+ node_name = "partitions-gp1";
+ if (!strcmp(subname, "gp1"))
+ node_name = "partitions-gp2";
+ if (!strcmp(subname, "gp2"))
+ node_name = "partitions-gp3";
+ if (!strcmp(subname, "gp3"))
+ node_name = "partitions-gp4";
+ }
+
+ return device_get_named_child_node(mmc_dev, node_name);
+}
+
static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct device *parent,
sector_t size,
@@ -2404,13 +2540,13 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
int area_type,
unsigned int part_type)
{
+ struct fwnode_handle *disk_fwnode;
struct mmc_blk_data *md;
int devidx, ret;
char cap_str[10];
- bool cache_enabled = false;
- bool fua_enabled = false;
+ unsigned int features = 0;
- devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
+ devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL);
if (devidx < 0) {
/*
* We get -ENOSPC because there are no more any available
@@ -2426,7 +2562,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
return ERR_PTR(devidx);
}
- md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+ md = kzalloc(sizeof(*md), GFP_KERNEL);
if (!md) {
ret = -ENOMEM;
goto out;
@@ -2440,7 +2576,19 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
md->read_only = mmc_blk_readonly(card);
- md->disk = mmc_init_queue(&md->queue, card);
+ if (mmc_host_can_cmd23(card->host) && mmc_card_can_cmd23(card))
+ md->flags |= MMC_BLK_CMD23;
+
+ if (md->flags & MMC_BLK_CMD23 &&
+ ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+ card->ext_csd.rel_sectors)) {
+ md->flags |= MMC_BLK_REL_WR;
+ features |= (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
+ } else if (mmc_cache_enabled(card->host)) {
+ features |= BLK_FEAT_WRITE_CACHE;
+ }
+
+ md->disk = mmc_init_queue(&md->queue, card, features);
if (IS_ERR(md->disk)) {
ret = PTR_ERR(md->disk);
goto err_kfree;
@@ -2460,7 +2608,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->disk->private_data = md;
md->parent = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
- if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
+ if (area_type & MMC_BLK_DATA_AREA_RPMB)
md->disk->flags |= GENHD_FL_NO_PART;
/*
@@ -2480,36 +2628,18 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
set_capacity(md->disk, size);
- if (mmc_host_cmd23(card->host)) {
- if ((mmc_card_mmc(card) &&
- card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
- (mmc_card_sd(card) &&
- card->scr.cmds & SD_SCR_CMD23_SUPPORT))
- md->flags |= MMC_BLK_CMD23;
- }
-
- if (md->flags & MMC_BLK_CMD23 &&
- ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
- card->ext_csd.rel_sectors)) {
- md->flags |= MMC_BLK_REL_WR;
- fua_enabled = true;
- cache_enabled = true;
- }
- if (mmc_cache_enabled(card->host))
- cache_enabled = true;
-
- blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);
-
string_get_size((u64)size, 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
- pr_info("%s: %s %s %s %s\n",
+ pr_info("%s: %s %s %s%s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
- cap_str, md->read_only ? "(ro)" : "");
+ cap_str, md->read_only ? " (ro)" : "");
/* used in ->open, must be set before add_disk: */
if (area_type == MMC_BLK_DATA_AREA_MAIN)
dev_set_drvdata(&card->dev, md);
- ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
+ disk_fwnode = mmc_blk_get_partitions_node(parent, subname);
+ ret = add_disk_fwnode(md->parent, md->disk, mmc_disk_attr_groups,
+ disk_fwnode);
if (ret)
goto err_put_disk;
return md;
@@ -2520,7 +2650,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
err_kfree:
kfree(md);
out:
- ida_simple_remove(&mmc_blk_ida, devidx);
+ ida_free(&mmc_blk_ida, devidx);
return ERR_PTR(ret);
}
@@ -2615,7 +2745,6 @@ static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
get_device(&rpmb->dev);
filp->private_data = rpmb;
- mmc_blk_get(rpmb->md->disk);
return nonseekable_open(inode, filp);
}
@@ -2625,7 +2754,6 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
struct mmc_rpmb_data, chrdev);
- mmc_blk_put(rpmb->md);
put_device(&rpmb->dev);
return 0;
@@ -2635,7 +2763,6 @@ static const struct file_operations mmc_rpmb_fileops = {
.release = mmc_rpmb_chrdev_release,
.open = mmc_rpmb_chrdev_open,
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = mmc_rpmb_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mmc_rpmb_ioctl_compat,
@@ -2646,10 +2773,159 @@ static void mmc_blk_rpmb_device_release(struct device *dev)
{
struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
- ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
+ rpmb_dev_unregister(rpmb->rdev);
+ mmc_blk_put(rpmb->md);
+ ida_free(&mmc_rpmb_ida, rpmb->id);
kfree(rpmb);
}
+static void free_idata(struct mmc_blk_ioc_data **idata, unsigned int cmd_count)
+{
+ unsigned int n;
+
+ for (n = 0; n < cmd_count; n++)
+ kfree(idata[n]);
+ kfree(idata);
+}
+
+static struct mmc_blk_ioc_data **alloc_idata(struct mmc_rpmb_data *rpmb,
+ unsigned int cmd_count)
+{
+ struct mmc_blk_ioc_data **idata;
+ unsigned int n;
+
+ idata = kcalloc(cmd_count, sizeof(*idata), GFP_KERNEL);
+ if (!idata)
+ return NULL;
+
+ for (n = 0; n < cmd_count; n++) {
+ idata[n] = kcalloc(1, sizeof(**idata), GFP_KERNEL);
+ if (!idata[n]) {
+ free_idata(idata, n);
+ return NULL;
+ }
+ idata[n]->rpmb = rpmb;
+ }
+
+ return idata;
+}
+
+static void set_idata(struct mmc_blk_ioc_data *idata, u32 opcode,
+ int write_flag, u8 *buf, unsigned int buf_bytes)
+{
+ /*
+ * The size of an RPMB frame must match what's expected by the
+ * hardware.
+ */
+ static_assert(!CHECK_SIZE_NEQ(512), "RPMB frame size must be 512 bytes");
+
+ idata->ic.opcode = opcode;
+ idata->ic.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ idata->ic.write_flag = write_flag;
+ idata->ic.blksz = RPMB_FRAME_SIZE;
+ idata->ic.blocks = buf_bytes / idata->ic.blksz;
+ idata->buf = buf;
+ idata->buf_bytes = buf_bytes;
+}
+
+static int mmc_route_rpmb_frames(struct device *dev, u8 *req,
+ unsigned int req_len, u8 *resp,
+ unsigned int resp_len)
+{
+ struct rpmb_frame *frm = (struct rpmb_frame *)req;
+ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
+ struct mmc_blk_data *md = rpmb->md;
+ struct mmc_blk_ioc_data **idata;
+ struct mmc_queue_req *mq_rq;
+ unsigned int cmd_count;
+ struct request *rq;
+ u16 req_type;
+ bool write;
+ int ret;
+
+ if (IS_ERR(md->queue.card))
+ return PTR_ERR(md->queue.card);
+
+ if (req_len < RPMB_FRAME_SIZE)
+ return -EINVAL;
+
+ req_type = be16_to_cpu(frm->req_resp);
+ switch (req_type) {
+ case RPMB_PROGRAM_KEY:
+ if (CHECK_SIZE_NEQ(req_len) || CHECK_SIZE_NEQ(resp_len))
+ return -EINVAL;
+ write = true;
+ break;
+ case RPMB_GET_WRITE_COUNTER:
+ if (CHECK_SIZE_NEQ(req_len) || CHECK_SIZE_NEQ(resp_len))
+ return -EINVAL;
+ write = false;
+ break;
+ case RPMB_WRITE_DATA:
+ if (!CHECK_SIZE_ALIGNED(req_len) || CHECK_SIZE_NEQ(resp_len))
+ return -EINVAL;
+ write = true;
+ break;
+ case RPMB_READ_DATA:
+ if (CHECK_SIZE_NEQ(req_len) || !CHECK_SIZE_ALIGNED(resp_len))
+ return -EINVAL;
+ write = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Write operations require 3 commands, read operations require 2 */
+ cmd_count = write ? 3 : 2;
+
+ idata = alloc_idata(rpmb, cmd_count);
+ if (!idata)
+ return -ENOMEM;
+
+ if (write) {
+ struct rpmb_frame *resp_frm = (struct rpmb_frame *)resp;
+
+ /* Send write request frame(s) */
+ set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK,
+ 1 | MMC_CMD23_ARG_REL_WR, req, req_len);
+
+ /* Send result request frame */
+ memset(resp_frm, 0, RPMB_FRAME_SIZE);
+ resp_frm->req_resp = cpu_to_be16(RPMB_RESULT_READ);
+ set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp,
+ resp_len);
+
+ /* Read response frame */
+ set_idata(idata[2], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len);
+ } else {
+ /* Send write request frame(s) */
+ set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, 1, req, req_len);
+
+ /* Read response frame */
+ set_idata(idata[1], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len);
+ }
+
+ rq = blk_mq_alloc_request(md->queue.queue, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out;
+ }
+
+ mq_rq = req_to_mmc_queue_req(rq);
+ mq_rq->drv_op = MMC_DRV_OP_IOCTL_RPMB;
+ mq_rq->drv_op_result = -EIO;
+ mq_rq->drv_op_data = idata;
+ mq_rq->ioc_count = cmd_count;
+ blk_execute_rq(rq, false);
+ ret = req_to_mmc_queue_req(rq)->drv_op_result;
+
+ blk_mq_free_request(rq);
+
+out:
+ free_idata(idata, cmd_count);
+ return ret;
+}
+
static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
struct mmc_blk_data *md,
unsigned int part_index,
@@ -2662,13 +2938,13 @@ static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
struct mmc_rpmb_data *rpmb;
/* This creates the minor number for the RPMB char device */
- devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
+ devidx = ida_alloc_max(&mmc_rpmb_ida, max_devices - 1, GFP_KERNEL);
if (devidx < 0)
return devidx;
rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
if (!rpmb) {
- ida_simple_remove(&mmc_rpmb_ida, devidx);
+ ida_free(&mmc_rpmb_ida, devidx);
return -ENOMEM;
}
@@ -2684,6 +2960,7 @@ static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
rpmb->dev.release = mmc_blk_rpmb_device_release;
device_initialize(&rpmb->dev);
dev_set_drvdata(&rpmb->dev, rpmb);
+ mmc_blk_get(md->disk);
rpmb->md = md;
cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
@@ -2806,6 +3083,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
if (ret >= 0) {
@@ -2844,6 +3122,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
goto out_free;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
blk_execute_rq(req, false);
err = req_to_mmc_queue_req(req)->drv_op_result;
@@ -2894,12 +3173,12 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
.llseek = default_llseek,
};
-static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
+static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{
struct dentry *root;
if (!card->debugfs_root)
- return 0;
+ return;
root = card->debugfs_root;
@@ -2908,19 +3187,13 @@ static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
debugfs_create_file_unsafe("status", 0400, root,
card,
&mmc_dbg_card_status_fops);
- if (!md->status_dentry)
- return -EIO;
}
if (mmc_card_mmc(card)) {
md->ext_csd_dentry =
- debugfs_create_file("ext_csd", S_IRUSR, root, card,
+ debugfs_create_file("ext_csd", 0400, root, card,
&mmc_dbg_ext_csd_fops);
- if (!md->ext_csd_dentry)
- return -EIO;
}
-
- return 0;
}
static void mmc_blk_remove_debugfs(struct mmc_card *card,
@@ -2929,22 +3202,17 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card,
if (!card->debugfs_root)
return;
- if (!IS_ERR_OR_NULL(md->status_dentry)) {
- debugfs_remove(md->status_dentry);
- md->status_dentry = NULL;
- }
+ debugfs_remove(md->status_dentry);
+ md->status_dentry = NULL;
- if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
- debugfs_remove(md->ext_csd_dentry);
- md->ext_csd_dentry = NULL;
- }
+ debugfs_remove(md->ext_csd_dentry);
+ md->ext_csd_dentry = NULL;
}
#else
-static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
+static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{
- return 0;
}
static void mmc_blk_remove_debugfs(struct mmc_card *card,
@@ -2954,6 +3222,42 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card,
#endif /* CONFIG_DEBUG_FS */
+static void mmc_blk_rpmb_add(struct mmc_card *card)
+{
+ struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+ struct mmc_rpmb_data *rpmb;
+ struct rpmb_dev *rdev;
+ unsigned int n;
+ u32 cid[4];
+ struct rpmb_descr descr = {
+ .type = RPMB_TYPE_EMMC,
+ .route_frames = mmc_route_rpmb_frames,
+ .reliable_wr_count = card->ext_csd.enhanced_rpmb_supported ?
+ 2 : 32,
+ .capacity = card->ext_csd.raw_rpmb_size_mult,
+ .dev_id = (void *)cid,
+ .dev_id_len = sizeof(cid),
+ };
+
+ /*
+ * Provice CID as an octet array. The CID needs to be interpreted
+ * when used as input to derive the RPMB key since some fields
+ * will change due to firmware updates.
+ */
+ for (n = 0; n < 4; n++)
+ cid[n] = be32_to_cpu((__force __be32)card->raw_cid[n]);
+
+ list_for_each_entry(rpmb, &md->rpmbs, node) {
+ rdev = rpmb_dev_register(&rpmb->dev, &descr);
+ if (IS_ERR(rdev)) {
+ pr_warn("%s: could not register RPMB device\n",
+ dev_name(&rpmb->dev));
+ continue;
+ }
+ rpmb->rdev = rdev;
+ }
+}
+
static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md;
@@ -2968,7 +3272,8 @@ static int mmc_blk_probe(struct mmc_card *card)
mmc_fixup_device(card, mmc_blk_fixups);
card->complete_wq = alloc_workqueue("mmc_complete",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU,
+ 0);
if (!card->complete_wq) {
pr_err("Failed to create mmc completion workqueue");
return -ENOMEM;
@@ -2999,6 +3304,8 @@ static int mmc_blk_probe(struct mmc_card *card)
pm_runtime_enable(&card->dev);
}
+ mmc_blk_rpmb_add(card);
+
return 0;
out:
@@ -3025,7 +3332,6 @@ static void mmc_blk_remove(struct mmc_card *card)
pm_runtime_disable(&card->dev);
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
- dev_set_drvdata(&card->dev, NULL);
destroy_workqueue(card->complete_wq);
}
@@ -3140,4 +3446,3 @@ module_exit(mmc_blk_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
-