summaryrefslogtreecommitdiff
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/core/Kconfig4
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/block.c1075
-rw-r--r--drivers/mmc/core/bus.c99
-rw-r--r--drivers/mmc/core/bus.h7
-rw-r--r--drivers/mmc/core/card.h101
-rw-r--r--drivers/mmc/core/core.c280
-rw-r--r--drivers/mmc/core/core.h51
-rw-r--r--drivers/mmc/core/crypto.c20
-rw-r--r--drivers/mmc/core/debugfs.c144
-rw-r--r--drivers/mmc/core/host.c123
-rw-r--r--drivers/mmc/core/host.h14
-rw-r--r--drivers/mmc/core/mmc.c419
-rw-r--r--drivers/mmc/core/mmc_ops.c215
-rw-r--r--drivers/mmc/core/mmc_ops.h23
-rw-r--r--drivers/mmc/core/mmc_test.c116
-rw-r--r--drivers/mmc/core/pwrseq.c2
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c5
-rw-r--r--drivers/mmc/core/pwrseq_sd8787.c37
-rw-r--r--drivers/mmc/core/pwrseq_simple.c60
-rw-r--r--drivers/mmc/core/queue.c166
-rw-r--r--drivers/mmc/core/queue.h3
-rw-r--r--drivers/mmc/core/quirks.h128
-rw-r--r--drivers/mmc/core/regulator.c163
-rw-r--r--drivers/mmc/core/sd.c380
-rw-r--r--drivers/mmc/core/sd.h6
-rw-r--r--drivers/mmc/core/sd_ops.c109
-rw-r--r--drivers/mmc/core/sd_ops.h5
-rw-r--r--drivers/mmc/core/sd_uhs2.c1304
-rw-r--r--drivers/mmc/core/sdio.c70
-rw-r--r--drivers/mmc/core/sdio_bus.c61
-rw-r--r--drivers/mmc/core/sdio_cis.c34
-rw-r--r--drivers/mmc/core/sdio_io.c2
-rw-r--r--drivers/mmc/core/sdio_irq.c4
-rw-r--r--drivers/mmc/core/sdio_uart.c85
-rw-r--r--drivers/mmc/core/slot-gpio.c108
-rw-r--r--drivers/mmc/host/Kconfig221
-rw-r--r--drivers/mmc/host/Makefile13
-rw-r--r--drivers/mmc/host/alcor.c32
-rw-r--r--drivers/mmc/host/atmel-mci.c504
-rw-r--r--drivers/mmc/host/au1xmmc.c89
-rw-r--r--drivers/mmc/host/bcm2835.c87
-rw-r--r--drivers/mmc/host/cavium-octeon.c8
-rw-r--r--drivers/mmc/host/cavium-thunderx.c9
-rw-r--r--drivers/mmc/host/cavium.c10
-rw-r--r--drivers/mmc/host/cb710-mmc.c44
-rw-r--r--drivers/mmc/host/cb710-mmc.h3
-rw-r--r--drivers/mmc/host/cqhci-core.c83
-rw-r--r--drivers/mmc/host/cqhci-crypto.c73
-rw-r--r--drivers/mmc/host/cqhci-crypto.h7
-rw-r--r--drivers/mmc/host/cqhci.h10
-rw-r--r--drivers/mmc/host/davinci_mmc.c157
-rw-r--r--drivers/mmc/host/dw_mmc-bluefield.c18
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c195
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c15
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798mv200.c246
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c9
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c9
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c47
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.h2
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c273
-rw-r--r--drivers/mmc/host/dw_mmc-starfive.c129
-rw-r--r--drivers/mmc/host/dw_mmc.c392
-rw-r--r--drivers/mmc/host/dw_mmc.h63
-rw-r--r--drivers/mmc/host/jz4740_mmc.c176
-rw-r--r--drivers/mmc/host/litex_mmc.c652
-rw-r--r--drivers/mmc/host/loongson2-mmc.c1030
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c328
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-clkc.c43
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c79
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c355
-rw-r--r--drivers/mmc/host/mmc_hsq.c64
-rw-r--r--drivers/mmc/host/mmc_hsq.h16
-rw-r--r--drivers/mmc/host/mmc_spi.c285
-rw-r--r--drivers/mmc/host/mmci.c393
-rw-r--r--drivers/mmc/host/mmci.h31
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c304
-rw-r--r--drivers/mmc/host/moxart-mmc.c115
-rw-r--r--drivers/mmc/host/mtk-sd.c1626
-rw-r--r--drivers/mmc/host/mvsdio.c40
-rw-r--r--drivers/mmc/host/mxcmmc.c122
-rw-r--r--drivers/mmc/host/mxs-mmc.c50
-rw-r--r--drivers/mmc/host/of_mmc_spi.c5
-rw-r--r--drivers/mmc/host/omap.c191
-rw-r--r--drivers/mmc/host/omap_hsmmc.c100
-rw-r--r--drivers/mmc/host/owl-mmc.c49
-rw-r--r--drivers/mmc/host/pxamci.c101
-rw-r--r--drivers/mmc/host/renesas_sdhi.h44
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c482
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c350
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c36
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c123
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c107
-rw-r--r--drivers/mmc/host/s3cmci.c1777
-rw-r--r--drivers/mmc/host/s3cmci.h75
-rw-r--r--drivers/mmc/host/sdhci-acpi.c215
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c33
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c440
-rw-r--r--drivers/mmc/host/sdhci-cadence.c288
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c113
-rw-r--r--drivers/mmc/host/sdhci-cqhci.h24
-rw-r--r--drivers/mmc/host/sdhci-dove.c20
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c688
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c43
-rw-r--r--drivers/mmc/host/sdhci-iproc.c78
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c27
-rw-r--r--drivers/mmc/host/sdhci-msm.c517
-rw-r--r--drivers/mmc/host/sdhci-npcm.c85
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c472
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed-test.c8
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c61
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c95
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c1906
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c81
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c4
-rw-r--r--drivers/mmc/host/sdhci-of-k1.c308
-rw-r--r--drivers/mmc/host/sdhci-of-ma35d1.c307
-rw-r--r--drivers/mmc/host/sdhci-of-sparx5.c35
-rw-r--r--drivers/mmc/host/sdhci-omap.c370
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c418
-rw-r--r--drivers/mmc/host/sdhci-pci-data.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c1279
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c303
-rw-r--r--drivers/mmc/host/sdhci-pci.h16
-rw-r--r--drivers/mmc/host/sdhci-pic32.c13
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c74
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h9
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c178
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c78
-rw-r--r--drivers/mmc/host/sdhci-s3c.c82
-rw-r--r--drivers/mmc/host/sdhci-spear.c29
-rw-r--r--drivers/mmc/host/sdhci-sprd.c276
-rw-r--r--drivers/mmc/host/sdhci-st.c26
-rw-r--r--drivers/mmc/host/sdhci-tegra.c238
-rw-r--r--drivers/mmc/host/sdhci-uhs2.c1251
-rw-r--r--drivers/mmc/host/sdhci-uhs2.h188
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c48
-rw-r--r--drivers/mmc/host/sdhci-xenon.c84
-rw-r--r--drivers/mmc/host/sdhci-xenon.h3
-rw-r--r--drivers/mmc/host/sdhci.c742
-rw-r--r--drivers/mmc/host/sdhci.h139
-rw-r--r--drivers/mmc/host/sdhci_am654.c533
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c113
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.h3
-rw-r--r--drivers/mmc/host/sdricoh_cs.c18
-rw-r--r--drivers/mmc/host/sh_mmcif.c172
-rw-r--r--drivers/mmc/host/sunplus-mmc.c997
-rw-r--r--drivers/mmc/host/sunxi-mmc.c71
-rw-r--r--drivers/mmc/host/tifm_sd.c74
-rw-r--r--drivers/mmc/host/tmio_mmc.c227
-rw-r--r--drivers/mmc/host/tmio_mmc.h60
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c155
-rw-r--r--drivers/mmc/host/toshsd.c18
-rw-r--r--drivers/mmc/host/uniphier-sd.c124
-rw-r--r--drivers/mmc/host/usdhi6rol0.c61
-rw-r--r--drivers/mmc/host/ushc.c4
-rw-r--r--drivers/mmc/host/via-sdmmc.c58
-rw-r--r--drivers/mmc/host/vub300.c79
-rw-r--r--drivers/mmc/host/wbsd.c111
-rw-r--r--drivers/mmc/host/wbsd.h10
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c57
161 files changed, 22674 insertions, 9291 deletions
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ae8b69aee619..14d2ecbb04d3 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -15,7 +15,7 @@ config PWRSEQ_EMMC
config PWRSEQ_SD8787
tristate "HW reset support for SD8787 BT + Wifi module"
- depends on OF && (MWIFIEX || BT_MRVL_SDIO || LIBERTAS_SDIO)
+ depends on OF && (MWIFIEX != n || BT_MRVL_SDIO != n || LIBERTAS_SDIO != n || WILC1000_SDIO != n)
help
This selects hardware reset support for the SD8787 BT + Wifi
module. By default this option is set to n.
@@ -37,6 +37,8 @@ config PWRSEQ_SIMPLE
config MMC_BLOCK
tristate "MMC block device driver"
depends on BLOCK
+ depends on RPMB || !RPMB
+ imply IOSCHED_BFQ
default y
help
Say Y here to enable the MMC block device driver support.
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 6a907736cd7a..15b067e8b0d1 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_MMC) += mmc_core.o
mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
- sdio_cis.o sdio_io.o sdio_irq.o \
+ sdio_cis.o sdio_io.o sdio_irq.o sd_uhs2.o\
slot-gpio.o regulator.o
mmc_core-$(CONFIG_OF) += pwrseq.o
obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 9890a1532cb0..fb6eb2d79b4f 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -28,10 +28,12 @@
#include <linux/errno.h>
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
+#include <linux/kref.h>
#include <linux/blkdev.h>
#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <linux/string_helpers.h>
#include <linux/delay.h>
#include <linux/capability.h>
@@ -39,6 +41,7 @@
#include <linux/pm_runtime.h>
#include <linux/idr.h>
#include <linux/debugfs.h>
+#include <linux/rpmb.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
@@ -47,6 +50,7 @@
#include <linux/mmc/sd.h>
#include <linux/uaccess.h>
+#include <linux/unaligned.h>
#include "queue.h"
#include "block.h"
@@ -75,8 +79,10 @@ MODULE_ALIAS("mmc:block");
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
-#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
- (rq_data_dir(req) == WRITE))
+#define RPMB_FRAME_SIZE sizeof(struct rpmb_frame)
+#define CHECK_SIZE_NEQ(val) ((val) != sizeof(struct rpmb_frame))
+#define CHECK_SIZE_ALIGNED(val) IS_ALIGNED((val), sizeof(struct rpmb_frame))
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -97,6 +103,11 @@ static int max_devices;
static DEFINE_IDA(mmc_blk_ida);
static DEFINE_IDA(mmc_rpmb_ida);
+struct mmc_blk_busy_data {
+ struct mmc_card *card;
+ u32 status;
+};
+
/*
* There is one mmc_blk_data per slot.
*/
@@ -111,7 +122,7 @@ struct mmc_blk_data {
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
- unsigned int usage;
+ struct kref kref;
unsigned int read_only;
unsigned int part_type;
unsigned int reset_done;
@@ -120,6 +131,7 @@ struct mmc_blk_data {
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
#define MMC_BLK_CQE_RECOVERY BIT(4)
+#define MMC_BLK_TRIM BIT(5)
/*
* Only set in main mmc_blk_data associated
@@ -127,8 +139,7 @@ struct mmc_blk_data {
* track of the current selected device partition.
*/
unsigned int part_curr;
- struct device_attribute force_ro;
- struct device_attribute power_ro_lock;
+#define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */
int area_type;
/* debugfs files (only in main mmc_blk_data) */
@@ -140,7 +151,7 @@ struct mmc_blk_data {
static dev_t mmc_rpmb_devt;
/* Bus type for RPMB character devices */
-static struct bus_type mmc_rpmb_bus_type = {
+static const struct bus_type mmc_rpmb_bus_type = {
.name = "mmc_rpmb",
};
@@ -151,6 +162,7 @@ static struct bus_type mmc_rpmb_bus_type = {
* @id: unique device ID number
* @part_index: partition index (0 on first)
* @md: parent MMC block device
+ * @rdev: registered RPMB device
* @node: list item, so we can put this device on a list
*/
struct mmc_rpmb_data {
@@ -159,6 +171,7 @@ struct mmc_rpmb_data {
int id;
unsigned int part_index;
struct mmc_blk_data *md;
+ struct rpmb_dev *rdev;
struct list_head node;
};
@@ -171,9 +184,11 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
unsigned int part_type);
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
+static int mmc_spi_err_check(struct mmc_card *card);
+static int mmc_blk_busy_cb(void *cb_data, bool *busy);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
@@ -181,10 +196,8 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
mutex_lock(&open_lock);
md = disk->private_data;
- if (md && md->usage == 0)
+ if (md && !kref_get_unless_zero(&md->kref))
md = NULL;
- if (md)
- md->usage++;
mutex_unlock(&open_lock);
return md;
@@ -196,18 +209,25 @@ static inline int mmc_get_devidx(struct gendisk *disk)
return devidx;
}
-static void mmc_blk_put(struct mmc_blk_data *md)
+static void mmc_blk_kref_release(struct kref *ref)
{
- mutex_lock(&open_lock);
- md->usage--;
- if (md->usage == 0) {
- int devidx = mmc_get_devidx(md->disk);
+ struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref);
+ int devidx;
- ida_simple_remove(&mmc_blk_ida, devidx);
- put_disk(md->disk);
- kfree(md);
- }
+ devidx = mmc_get_devidx(md->disk);
+ ida_free(&mmc_blk_ida, devidx);
+
+ mutex_lock(&open_lock);
+ md->disk->private_data = NULL;
mutex_unlock(&open_lock);
+
+ put_disk(md->disk);
+ kfree(md);
+}
+
+static void mmc_blk_put(struct mmc_blk_data *md)
+{
+ kref_put(&md->kref, mmc_blk_kref_release);
}
static ssize_t power_ro_lock_show(struct device *dev,
@@ -223,7 +243,7 @@ static ssize_t power_ro_lock_show(struct device *dev,
else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
locked = 1;
- ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+ ret = sysfs_emit(buf, "%d\n", locked);
mmc_blk_put(md);
@@ -249,15 +269,16 @@ static ssize_t power_ro_lock_store(struct device *dev,
mq = &md->queue;
/* Dispatch locking to the block layer */
- req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
count = PTR_ERR(req);
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
- blk_execute_rq(NULL, req, 0);
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
- blk_put_request(req);
+ blk_mq_free_request(req);
if (!ret) {
pr_info("%s: Locking boot partition ro until next power on\n",
@@ -275,15 +296,18 @@ out_put:
return count;
}
+static DEVICE_ATTR(ro_lock_until_next_power_on, 0,
+ power_ro_lock_show, power_ro_lock_store);
+
static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
- ret = snprintf(buf, PAGE_SIZE, "%d\n",
- get_disk_ro(dev_to_disk(dev)) ^
- md->read_only);
+ ret = sysfs_emit(buf, "%d\n",
+ get_disk_ro(dev_to_disk(dev)) ^
+ md->read_only);
mmc_blk_put(md);
return ret;
}
@@ -292,10 +316,10 @@ static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
- char *end;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
- unsigned long set = simple_strtoul(buf, &end, 0);
- if (end == buf) {
+ unsigned long set;
+
+ if (kstrtoul(buf, 0, &set)) {
ret = -EINVAL;
goto out;
}
@@ -307,15 +331,53 @@ out:
return ret;
}
-static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
+static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store);
+
+static struct attribute *mmc_disk_attrs[] = {
+ &dev_attr_force_ro.attr,
+ &dev_attr_ro_lock_until_next_power_on.attr,
+ NULL,
+};
+
+static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj,
+ struct attribute *a, int n)
{
- struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
+ struct device *dev = kobj_to_dev(kobj);
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ umode_t mode = a->mode;
+
+ if (a == &dev_attr_ro_lock_until_next_power_on.attr &&
+ (md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+ md->queue.card->ext_csd.boot_ro_lockable) {
+ mode = 0444;
+ if (!(md->queue.card->ext_csd.boot_ro_lock &
+ EXT_CSD_BOOT_WP_B_PWR_WP_DIS))
+ mode |= 0200;
+ }
+
+ mmc_blk_put(md);
+ return mode;
+}
+
+static const struct attribute_group mmc_disk_attr_group = {
+ .is_visible = mmc_disk_attrs_is_visible,
+ .attrs = mmc_disk_attrs,
+};
+
+static const struct attribute_group *mmc_disk_attr_groups[] = {
+ &mmc_disk_attr_group,
+ NULL,
+};
+
+static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct mmc_blk_data *md = mmc_blk_get(disk);
int ret = -ENXIO;
mutex_lock(&block_mutex);
if (md) {
ret = 0;
- if ((mode & FMODE_WRITE) && md->read_only) {
+ if ((mode & BLK_OPEN_WRITE) && md->read_only) {
mmc_blk_put(md);
ret = -EROFS;
}
@@ -325,7 +387,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
+static void mmc_blk_release(struct gendisk *disk)
{
struct mmc_blk_data *md = disk->private_data;
@@ -335,9 +397,9 @@ static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
}
static int
-mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+mmc_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ geo->cylinders = get_capacity(disk) / (4 * 16);
geo->heads = 4;
geo->sectors = 16;
return 0;
@@ -347,6 +409,10 @@ struct mmc_blk_ioc_data {
struct mmc_ioc_cmd ic;
unsigned char *buf;
u64 buf_bytes;
+ unsigned int flags;
+#define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */
+#define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */
+
struct mmc_rpmb_data *rpmb;
};
@@ -356,7 +422,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
struct mmc_blk_ioc_data *idata;
int err;
- idata = kmalloc(sizeof(*idata), GFP_KERNEL);
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
if (!idata) {
err = -ENOMEM;
goto out;
@@ -411,55 +477,29 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
return 0;
}
-static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- u32 *resp_errs)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
- int err = 0;
- u32 status;
-
- do {
- bool done = time_after(jiffies, timeout);
-
- err = __mmc_send_status(card, &status, 5);
- if (err) {
- dev_err(mmc_dev(card->host),
- "error %d requesting status\n", err);
- return err;
- }
-
- /* Accumulate any response error bits seen */
- if (resp_errs)
- *resp_errs |= status;
-
- /*
- * Timeout if the device never becomes ready for data and never
- * leaves the program state.
- */
- if (done) {
- dev_err(mmc_dev(card->host),
- "Card stuck in wrong state! %s status: %#x\n",
- __func__, status);
- return -ETIMEDOUT;
- }
- } while (!mmc_ready_for_data(status));
-
- return err;
-}
-
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
- struct mmc_blk_ioc_data *idata)
+ struct mmc_blk_ioc_data **idatas, int i)
{
struct mmc_command cmd = {}, sbc = {};
struct mmc_data data = {};
struct mmc_request mrq = {};
struct scatterlist sg;
+ bool r1b_resp;
+ unsigned int busy_timeout_ms;
int err;
unsigned int target_part;
+ struct mmc_blk_ioc_data *idata = idatas[i];
+ struct mmc_blk_ioc_data *prev_idata = NULL;
if (!card || !md || !idata)
return -EINVAL;
+ if (idata->flags & MMC_BLK_IOC_DROP)
+ return 0;
+
+ if (idata->flags & MMC_BLK_IOC_SBC && i > 0)
+ prev_idata = idatas[i - 1];
+
/*
* The RPMB accesses comes in from the character device, so we
* need to target these explicitly. Else we just target the
@@ -498,19 +538,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->ic.data_timeout_ns)
data.timeout_ns = idata->ic.data_timeout_ns;
- if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
- /*
- * Pretend this is a data transfer and rely on the
- * host driver to compute timeout. When all host
- * drivers support cmd.cmd_timeout for R1B, this
- * can be changed to:
- *
- * mrq.data = NULL;
- * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
- */
- data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
- }
-
mrq.data = &data;
}
@@ -526,7 +553,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
return err;
}
- if (idata->rpmb) {
+ if (idata->rpmb || prev_idata) {
sbc.opcode = MMC_SET_BLOCK_COUNT;
/*
* We don't do any blockcount validation because the max size
@@ -534,6 +561,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
* 'Reliable Write' bit here.
*/
sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
+ if (prev_idata)
+ sbc.arg = prev_idata->ic.arg;
sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
mrq.sbc = &sbc;
}
@@ -542,7 +571,23 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
(cmd.opcode == MMC_SWITCH))
return mmc_sanitize(card, idata->ic.cmd_timeout_ms);
+ /* If it's an R1B response we need some more preparations. */
+ busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS;
+ r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B;
+ if (r1b_resp)
+ mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms);
+
mmc_wait_for_req(card->host, &mrq);
+ memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
+
+ if (prev_idata) {
+ memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp));
+ if (sbc.error) {
+ dev_err(mmc_dev(card->host), "%s: sbc error %d\n",
+ __func__, sbc.error);
+ return sbc.error;
+ }
+ }
if (cmd.error) {
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
@@ -592,14 +637,27 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->ic.postsleep_min_us)
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
- memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
+ if (mmc_host_is_spi(card->host)) {
+ if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY)
+ return mmc_spi_err_check(card);
+ return err;
+ }
- if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
- /*
- * Ensure RPMB/R1B command has completed by polling CMD13
- * "Send Status".
- */
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL);
+ /*
+ * Ensure RPMB, writes and R1B responses are completed by polling with
+ * CMD13. Note that, usually we don't need to poll when using HW busy
+ * detection, but here it's needed since some commands may indicate the
+ * error through the R1 status bits.
+ */
+ if (idata->rpmb || idata->ic.write_flag || r1b_resp) {
+ struct mmc_blk_busy_data cb_data = {
+ .card = card,
+ };
+
+ err = __mmc_poll_for_busy(card->host, 0, busy_timeout_ms,
+ &mmc_blk_busy_cb, &cb_data);
+
+ idata->ic.response[0] = cb_data.status;
}
return err;
@@ -632,7 +690,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
* Dispatch the ioctl() into the block request queue.
*/
mq = &md->queue;
- req = blk_get_request(mq->queue,
+ req = blk_mq_alloc_request(mq->queue,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -641,12 +699,13 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idatas[0] = idata;
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
- blk_put_request(req);
+ blk_mq_free_request(req);
cmd_done:
kfree(idata->buf);
@@ -662,8 +721,9 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
struct mmc_ioc_cmd __user *cmds = user->cmds;
struct mmc_card *card;
struct mmc_queue *mq;
- int i, err = 0, ioc_err = 0;
+ int err = 0, ioc_err = 0;
__u64 num_of_cmds;
+ unsigned int i, n;
struct request *req;
if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
@@ -676,15 +736,16 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
if (num_of_cmds > MMC_IOC_MAX_CMDS)
return -EINVAL;
- idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
+ n = num_of_cmds;
+ idata = kcalloc(n, sizeof(*idata), GFP_KERNEL);
if (!idata)
return -ENOMEM;
- for (i = 0; i < num_of_cmds; i++) {
+ for (i = 0; i < n; i++) {
idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
if (IS_ERR(idata[i])) {
err = PTR_ERR(idata[i]);
- num_of_cmds = i;
+ n = i;
goto cmd_err;
}
/* This will be NULL on non-RPMB ioctl():s */
@@ -702,7 +763,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
* Dispatch the ioctl()s into the block request queue.
*/
mq = &md->queue;
- req = blk_get_request(mq->queue,
+ req = blk_mq_alloc_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -710,19 +771,20 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
}
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idata;
- req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
- blk_execute_rq(NULL, req, 0);
+ req_to_mmc_queue_req(req)->ioc_count = n;
+ blk_execute_rq(req, false);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
/* copy to user if data and response */
- for (i = 0; i < num_of_cmds && !err; i++)
+ for (i = 0; i < n && !err; i++)
err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
- blk_put_request(req);
+ blk_mq_free_request(req);
cmd_err:
- for (i = 0; i < num_of_cmds; i++) {
+ for (i = 0; i < n; i++) {
kfree(idata[i]->buf);
kfree(idata[i]);
}
@@ -742,7 +804,7 @@ static int mmc_blk_check_blkdev(struct block_device *bdev)
return 0;
}
-static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mmc_blk_data *md;
@@ -779,13 +841,33 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
}
#ifdef CONFIG_COMPAT
-static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
}
#endif
+static int mmc_blk_alternative_gpt_sector(struct gendisk *disk,
+ sector_t *sector)
+{
+ struct mmc_blk_data *md;
+ int ret;
+
+ md = mmc_blk_get(disk);
+ if (!md)
+ return -EINVAL;
+
+ if (md->queue.card)
+ ret = mmc_card_alternative_gpt_sector(md->queue.card, sector);
+ else
+ ret = -ENODEV;
+
+ mmc_blk_put(md);
+
+ return ret;
+}
+
static const struct block_device_operations mmc_bdops = {
.open = mmc_blk_open,
.release = mmc_blk_release,
@@ -795,14 +877,17 @@ static const struct block_device_operations mmc_bdops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = mmc_blk_compat_ioctl,
#endif
+ .alternative_gpt_sector = mmc_blk_alternative_gpt_sector,
};
static int mmc_blk_part_switch_pre(struct mmc_card *card,
unsigned int part_type)
{
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
+ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ if ((part_type & mask) == rpmb) {
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (ret)
@@ -817,9 +902,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
static int mmc_blk_part_switch_post(struct mmc_card *card,
unsigned int part_type)
{
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
+ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ if ((part_type & mask) == rpmb) {
mmc_retune_unpause(card->host);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
ret = mmc_cmdq_enable(card);
@@ -869,30 +956,22 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
int err;
u32 result;
__be32 *blocks;
+ u8 resp_sz = mmc_card_ult_capacity(card) ? 8 : 4;
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
-
struct scatterlist sg;
- cmd.opcode = MMC_APP_CMD;
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
-
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ err = mmc_app_cmd(card->host, card);
if (err)
return err;
- if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
- return -EIO;
-
- memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- data.blksz = 4;
+ data.blksz = resp_sz;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
@@ -902,15 +981,27 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
mrq.cmd = &cmd;
mrq.data = &data;
- blocks = kmalloc(4, GFP_KERNEL);
+ blocks = kmalloc(resp_sz, GFP_NOIO);
if (!blocks)
return -ENOMEM;
- sg_init_one(&sg, blocks, 4);
+ sg_init_one(&sg, blocks, resp_sz);
mmc_wait_for_req(card->host, &mrq);
- result = ntohl(*blocks);
+ if (mmc_card_ult_capacity(card)) {
+ /*
+ * Normally, ACMD22 returns the number of written sectors as
+ * u32. SDUC, however, returns it as u64. This is not a
+ * superfluous requirement, because SDUC writes may exceed 2TB.
+ * For Linux mmc however, the previously write operation could
+ * not be more than the block layer limits, thus just make room
+ * for a u64 and cast the response back to u32.
+ */
+ result = clamp_val(get_unaligned_be64(blocks), 0, UINT_MAX);
+ } else {
+ result = ntohl(*blocks);
+ }
kfree(blocks);
if (cmd.error || data.error)
@@ -949,33 +1040,39 @@ static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
return ms;
}
+/*
+ * Attempts to reset the card and get back to the requested partition.
+ * Therefore any error here must result in cancelling the block layer
+ * request, it must not be reattempted without going through the mmc_blk
+ * partition sanity checks.
+ */
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
int err;
+ struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev);
if (md->reset_done & type)
return -EEXIST;
md->reset_done |= type;
- err = mmc_hw_reset(host);
+ err = mmc_hw_reset(host->card);
+ /*
+ * A successful reset will leave the card in the main partition, but
+ * upon failure it might not be, so set it to MMC_BLK_PART_INVALID
+ * in that case.
+ */
+ main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type;
+ if (err)
+ return err;
/* Ensure we switch back to the correct partition */
- if (err) {
- struct mmc_blk_data *main_md =
- dev_get_drvdata(&host->card->dev);
- int part_err;
-
- main_md->part_curr = main_md->part_type;
- part_err = mmc_blk_part_switch(host->card, md->part_type);
- if (part_err) {
- /*
- * We have failed to get back into the correct
- * partition, so we need to abort the whole request.
- */
- return -ENODEV;
- }
- }
- return err;
+ if (mmc_blk_part_switch(host->card, md->part_type))
+ /*
+ * We have failed to get back into the correct
+ * partition, so we need to abort the whole request.
+ */
+ return -ENODEV;
+ return 0;
}
static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
@@ -983,6 +1080,20 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
md->reset_done &= ~type;
}
+static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq)
+{
+ struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data;
+ int i;
+
+ for (i = 1; i < mq_rq->ioc_count; i++) {
+ if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT &&
+ mmc_op_multi(idata[i]->ic.opcode)) {
+ idata[i - 1]->flags |= MMC_BLK_IOC_DROP;
+ idata[i]->flags |= MMC_BLK_IOC_SBC;
+ }
+ }
+}
+
/*
* The non-block commands come back from the block layer after it queued it and
* processed it with all other requests and then they get issued in this
@@ -1010,11 +1121,14 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
if (ret)
break;
}
+
+ mmc_blk_check_sbc(mq_rq);
+
fallthrough;
case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
- ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+ ret = __mmc_blk_ioctl_cmd(card, md, idata, i);
if (ret)
break;
}
@@ -1055,15 +1169,17 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
-static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req,
+ int type, unsigned int erase_arg)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
- unsigned int from, nr;
- int err = 0, type = MMC_BLK_DISCARD;
+ unsigned int nr;
+ sector_t from;
+ int err = 0;
blk_status_t status = BLK_STS_OK;
- if (!mmc_can_erase(card)) {
+ if (!mmc_card_can_erase(card)) {
status = BLK_STS_NOTSUPP;
goto fail;
}
@@ -1076,13 +1192,13 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
- card->erase_arg == MMC_TRIM_ARG ?
+ erase_arg == MMC_TRIM_ARG ?
INAND_CMD38_ARG_TRIM :
INAND_CMD38_ARG_ERASE,
card->ext_csd.generic_cmd6_time);
}
if (!err)
- err = mmc_erase(card, from, nr, card->erase_arg);
+ err = mmc_erase(card, from, nr, erase_arg);
} while (err == -EIO && !mmc_blk_reset(md, card->host, type));
if (err)
status = BLK_STS_IOERR;
@@ -1092,16 +1208,34 @@ fail:
blk_mq_end_request(req, status);
}
+static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req)
+{
+ mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG);
+}
+
+static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = md->queue.card;
+ unsigned int arg = card->erase_arg;
+
+ if (mmc_card_broken_sd_discard(card))
+ arg = SD_ERASE_ARG;
+
+ mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg);
+}
+
static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
- unsigned int from, nr, arg;
+ unsigned int nr, arg;
+ sector_t from;
int err = 0, type = MMC_BLK_SECDISCARD;
blk_status_t status = BLK_STS_OK;
- if (!(mmc_can_secure_erase_trim(card))) {
+ if (!(mmc_card_can_secure_erase_trim(card))) {
status = BLK_STS_NOTSUPP;
goto out;
}
@@ -1109,7 +1243,7 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
- if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+ if (mmc_card_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
arg = MMC_SECURE_TRIM1_ARG;
else
arg = MMC_SECURE_ERASE_ARG;
@@ -1250,7 +1384,7 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr_p,
+ int recovery_mode, bool *do_rel_wr_p,
bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
@@ -1316,12 +1450,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks--;
/*
- * After a read error, we redo the request one sector
+ * After a read error, we redo the request one (native) sector
* at a time in order to accurately determine which
* sectors can be read successfully.
*/
- if (disable_multi)
- brq->data.blocks = 1;
+ if (recovery_mode)
+ brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
/*
* Some controllers have HW issues while operating
@@ -1415,6 +1549,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
+ } else if (mq->in_recovery) {
+ blk_mq_requeue_request(req, true);
} else {
blk_mq_end_request(req, BLK_STS_OK);
}
@@ -1447,8 +1583,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq)
err = mmc_cqe_recovery(host);
if (err)
mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
- else
- mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
}
@@ -1539,7 +1674,7 @@ static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq)
{
u32 readcmd, writecmd;
@@ -1548,7 +1683,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->blkdata;
bool do_rel_wr, do_data_tag;
- mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
+ mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
brq->mrq.cmd = &brq->cmd;
@@ -1592,8 +1727,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
* these, while retaining features like reliable writes.
*/
if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
- (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
- do_data_tag)) {
+ (do_rel_wr || !mmc_card_blk_no_cmd23(card) || do_data_tag)) {
brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
brq->sbc.arg = brq->data.blocks |
(do_rel_wr ? (1 << 31) : 0) |
@@ -1601,6 +1735,11 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
+
+ if (mmc_card_ult_capacity(card)) {
+ brq->cmd.ext_addr = blk_rq_pos(req) >> 32;
+ brq->cmd.has_ext_addr = true;
+ }
}
#define MMC_MAX_RETRIES 5
@@ -1630,7 +1769,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
mmc_blk_send_stop(card, timeout);
- err = card_busy_detect(card, timeout, NULL);
+ err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO);
mmc_retune_release(card->host);
@@ -1639,7 +1778,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
#define MMC_READ_SINGLE_RETRIES 2
-/* Single sector read during recovery */
+/* Single (native) sector read during recovery */
static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
@@ -1647,31 +1786,32 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
- int retries = 0;
+ size_t bytes_per_read = queue_physical_block_size(mq->queue);
do {
u32 status;
int err;
+ int retries = 0;
- mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+ while (retries++ <= MMC_READ_SINGLE_RETRIES) {
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
- mmc_wait_for_req(host, mrq);
+ mmc_wait_for_req(host, mrq);
- err = mmc_send_status(card, &status);
- if (err)
- goto error_exit;
-
- if (!mmc_host_is_spi(host) &&
- !mmc_ready_for_data(status)) {
- err = mmc_blk_fix_state(card, req);
+ err = mmc_send_status(card, &status);
if (err)
goto error_exit;
- }
- if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
- continue;
+ if (!mmc_host_is_spi(host) &&
+ !mmc_ready_for_data(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
- retries = 0;
+ if (!mrq->cmd->error)
+ break;
+ }
if (mrq->cmd->error ||
mrq->data->error ||
@@ -1681,13 +1821,13 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
else
error = BLK_STS_OK;
- } while (blk_update_request(req, error, 512));
+ } while (blk_update_request(req, error, bytes_per_read));
return;
error_exit:
mrq->data->bytes_xfered = 0;
- blk_update_request(req, BLK_STS_IOERR, 512);
+ blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
/* Let it try the remaining request again */
if (mqrq->retries > MMC_MAX_RETRIES - 1)
mqrq->retries = MMC_MAX_RETRIES - 1;
@@ -1802,7 +1942,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
/* Reset if the card is in a bad state */
if (!mmc_host_is_spi(mq->card->host) &&
err && mmc_blk_reset(md, card->host, type)) {
- pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
+ pr_err("%s: recovery failed!\n", req->q->disk->disk_name);
mqrq->retries = MMC_NO_RETRIES;
return;
}
@@ -1815,8 +1955,9 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
return;
/* Reset before last retry */
- if (mqrq->retries + 1 == MMC_MAX_RETRIES)
- mmc_blk_reset(md, card->host, type);
+ if (mqrq->retries + 1 == MMC_MAX_RETRIES &&
+ mmc_blk_reset(md, card->host, type))
+ return;
/* Command errors fail fast, so use all MMC_MAX_RETRIES */
if (brq->sbc.error || brq->cmd.error)
@@ -1828,10 +1969,9 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
return;
}
- /* FIXME: Missing single sector read for large sector size */
- if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
- brq->data.blocks > 1) {
- /* Read one sector at a time */
+ if (rq_data_dir(req) == READ && brq->data.blocks >
+ queue_physical_block_size(mq->queue) >> 9) {
+ /* Read one (native) sector at a time */
mmc_blk_read_single(mq, req);
return;
}
@@ -1845,28 +1985,80 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
}
+static int mmc_spi_err_check(struct mmc_card *card)
+{
+ u32 status = 0;
+ int err;
+
+ /*
+ * SPI does not have a TRAN state we have to wait on, instead the
+ * card is ready again when it no longer holds the line LOW.
+ * We still have to ensure two things here before we know the write
+ * was successful:
+ * 1. The card has not disconnected during busy and we actually read our
+ * own pull-up, thinking it was still connected, so ensure it
+ * still responds.
+ * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
+ * just reconnected card after being disconnected during busy.
+ */
+ err = __mmc_send_status(card, &status, 0);
+ if (err)
+ return err;
+ /* All R1 and R2 bits of SPI are errors in our case */
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int mmc_blk_busy_cb(void *cb_data, bool *busy)
+{
+ struct mmc_blk_busy_data *data = cb_data;
+ u32 status = 0;
+ int err;
+
+ err = mmc_send_status(data->card, &status);
+ if (err)
+ return err;
+
+ /* Accumulate response error bits. */
+ data->status |= status;
+
+ *busy = !mmc_ready_for_data(status);
+ return 0;
+}
+
static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
- u32 status = 0;
+ struct mmc_blk_busy_data cb_data;
int err;
- if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+ if (rq_data_dir(req) == READ)
return 0;
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
+ if (mmc_host_is_spi(card->host)) {
+ err = mmc_spi_err_check(card);
+ if (err)
+ mqrq->brq.data.bytes_xfered = 0;
+ return err;
+ }
+
+ cb_data.card = card;
+ cb_data.status = 0;
+ err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
+ &mmc_blk_busy_cb, &cb_data);
/*
* Do not assume data transferred correctly if there are any error bits
* set.
*/
- if (status & mmc_blk_stop_err_bits(&mqrq->brq)) {
+ if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) {
mqrq->brq.data.bytes_xfered = 0;
err = err ? err : -EIO;
}
/* Copy the exception bit so it will be seen later on */
- if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT)
+ if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT)
mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
return err;
@@ -1979,14 +2171,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
mmc_blk_urgent_bkops(mq, mqrq);
}
-static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
{
unsigned long flags;
bool put_card;
spin_lock_irqsave(&mq->lock, flags);
- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+ mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
@@ -1996,8 +2188,10 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
mmc_put_card(mq->card, &mq->ctx);
}
-static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
+ bool can_sleep)
{
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
struct mmc_host *host = mq->card->host;
@@ -2008,12 +2202,16 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
* Block layer timeouts race with completions which means the normal
* completion path cannot be used during recovery.
*/
- if (mq->in_recovery)
+ if (mq->in_recovery) {
mmc_blk_mq_complete_rq(mq, req);
- else if (likely(!blk_should_fake_timeout(req->q)))
- blk_mq_complete_request(req);
+ } else if (likely(!blk_should_fake_timeout(req->q))) {
+ if (can_sleep)
+ blk_mq_complete_request_direct(req, mmc_blk_mq_complete);
+ else
+ blk_mq_complete_request(req);
+ }
- mmc_blk_mq_dec_in_flight(mq, req);
+ mmc_blk_mq_dec_in_flight(mq, issue_type);
}
void mmc_blk_mq_recovery(struct mmc_queue *mq)
@@ -2032,13 +2230,13 @@ void mmc_blk_mq_recovery(struct mmc_queue *mq)
mmc_blk_urgent_bkops(mq, mqrq);
- mmc_blk_mq_post_req(mq, req);
+ mmc_blk_mq_post_req(mq, req, true);
}
static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
struct request **prev_req)
{
- if (mmc_host_done_complete(mq->card->host))
+ if (mmc_host_can_done_complete(mq->card->host))
return;
mutex_lock(&mq->complete_lock);
@@ -2051,7 +2249,7 @@ static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
if (prev_req)
*prev_req = mq->complete_req;
else
- mmc_blk_mq_post_req(mq, mq->complete_req);
+ mmc_blk_mq_post_req(mq, mq->complete_req, true);
mq->complete_req = NULL;
@@ -2077,7 +2275,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
struct mmc_host *host = mq->card->host;
unsigned long flags;
- if (!mmc_host_done_complete(host)) {
+ if (!mmc_host_can_done_complete(host)) {
bool waiting;
/*
@@ -2123,7 +2321,8 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
mq->rw_wait = false;
wake_up(&mq->wait);
- mmc_blk_mq_post_req(mq, req);
+ /* context unknown */
+ mmc_blk_mq_post_req(mq, req, false);
}
static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
@@ -2183,13 +2382,13 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
err = mmc_start_request(host, &mqrq->brq.mrq);
if (prev_req)
- mmc_blk_mq_post_req(mq, prev_req);
+ mmc_blk_mq_post_req(mq, prev_req, true);
if (err)
mq->rw_wait = false;
/* Release re-tuning here where there is no synchronization required */
- if (err || mmc_host_done_complete(host))
+ if (err || mmc_host_can_done_complete(host))
mmc_retune_release(host);
out_post_req:
@@ -2234,6 +2433,9 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
case REQ_OP_SECURE_ERASE:
mmc_blk_issue_secdiscard_rq(mq, req);
break;
+ case REQ_OP_WRITE_ZEROES:
+ mmc_blk_issue_trim_rq(mq, req);
+ break;
case REQ_OP_FLUSH:
mmc_blk_issue_flush(mq, req);
break;
@@ -2252,8 +2454,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
}
ret = mmc_blk_cqe_issue_flush(mq, req);
break;
- case REQ_OP_READ:
case REQ_OP_WRITE:
+ card->written_flag = true;
+ fallthrough;
+ case REQ_OP_READ:
if (host->cqe_enabled)
ret = mmc_blk_cqe_issue_rw_rq(mq, req);
else
@@ -2278,18 +2482,71 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
+/*
+ * Search for a declared partitions node for the disk in mmc-card related node.
+ *
+ * This is to permit support for partition table defined in DT in special case
+ * where a partition table is not written in the disk and is expected to be
+ * passed from the running system.
+ *
+ * For the user disk, "partitions" node is searched.
+ * For the special HW disk, "partitions-" node with the appended name is used
+ * following this conversion table (to adhere to JEDEC naming)
+ * - boot0 -> partitions-boot1
+ * - boot1 -> partitions-boot2
+ * - gp0 -> partitions-gp1
+ * - gp1 -> partitions-gp2
+ * - gp2 -> partitions-gp3
+ * - gp3 -> partitions-gp4
+ */
+static struct fwnode_handle *mmc_blk_get_partitions_node(struct device *mmc_dev,
+ const char *subname)
+{
+ const char *node_name = "partitions";
+
+ if (subname) {
+ mmc_dev = mmc_dev->parent;
+
+ /*
+ * Check if we are allocating a BOOT disk boot0/1 disk.
+ * In DT we use the JEDEC naming boot1/2.
+ */
+ if (!strcmp(subname, "boot0"))
+ node_name = "partitions-boot1";
+ if (!strcmp(subname, "boot1"))
+ node_name = "partitions-boot2";
+ /*
+ * Check if we are allocating a GP disk gp0/1/2/3 disk.
+ * In DT we use the JEDEC naming gp1/2/3/4.
+ */
+ if (!strcmp(subname, "gp0"))
+ node_name = "partitions-gp1";
+ if (!strcmp(subname, "gp1"))
+ node_name = "partitions-gp2";
+ if (!strcmp(subname, "gp2"))
+ node_name = "partitions-gp3";
+ if (!strcmp(subname, "gp3"))
+ node_name = "partitions-gp4";
+ }
+
+ return device_get_named_child_node(mmc_dev, node_name);
+}
+
static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct device *parent,
sector_t size,
bool default_ro,
const char *subname,
- int area_type)
+ int area_type,
+ unsigned int part_type)
{
+ struct fwnode_handle *disk_fwnode;
struct mmc_blk_data *md;
int devidx, ret;
char cap_str[10];
+ unsigned int features = 0;
- devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
+ devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL);
if (devidx < 0) {
/*
* We get -ENOSPC because there are no more any available
@@ -2305,7 +2562,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
return ERR_PTR(devidx);
}
- md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+ md = kzalloc(sizeof(*md), GFP_KERNEL);
if (!md) {
ret = -ENOMEM;
goto out;
@@ -2319,7 +2576,19 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
md->read_only = mmc_blk_readonly(card);
- md->disk = mmc_init_queue(&md->queue, card);
+ if (mmc_host_can_cmd23(card->host) && mmc_card_can_cmd23(card))
+ md->flags |= MMC_BLK_CMD23;
+
+ if (md->flags & MMC_BLK_CMD23 &&
+ ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+ card->ext_csd.rel_sectors)) {
+ md->flags |= MMC_BLK_REL_WR;
+ features |= (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
+ } else if (mmc_cache_enabled(card->host)) {
+ features |= BLK_FEAT_WRITE_CACHE;
+ }
+
+ md->disk = mmc_init_queue(&md->queue, card, features);
if (IS_ERR(md->disk)) {
ret = PTR_ERR(md->disk);
goto err_kfree;
@@ -2327,8 +2596,10 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
INIT_LIST_HEAD(&md->part);
INIT_LIST_HEAD(&md->rpmbs);
- md->usage = 1;
+ kref_init(&md->kref);
+
md->queue.blkdata = md;
+ md->part_type = part_type;
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->minors = perdev_minors;
@@ -2337,10 +2608,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->disk->private_data = md;
md->parent = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
- md->disk->flags = GENHD_FL_EXT_DEVT;
- if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
- md->disk->flags |= GENHD_FL_NO_PART_SCAN
- | GENHD_FL_SUPPRESS_PARTITION_INFO;
+ if (area_type & MMC_BLK_DATA_AREA_RPMB)
+ md->disk->flags |= GENHD_FL_NO_PART;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -2359,34 +2628,29 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
set_capacity(md->disk, size);
- if (mmc_host_cmd23(card->host)) {
- if ((mmc_card_mmc(card) &&
- card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
- (mmc_card_sd(card) &&
- card->scr.cmds & SD_SCR_CMD23_SUPPORT))
- md->flags |= MMC_BLK_CMD23;
- }
-
- if (mmc_card_mmc(card) &&
- md->flags & MMC_BLK_CMD23 &&
- ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
- card->ext_csd.rel_sectors)) {
- md->flags |= MMC_BLK_REL_WR;
- blk_queue_write_cache(md->queue.queue, true, true);
- }
-
string_get_size((u64)size, 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
- pr_info("%s: %s %s %s %s\n",
+ pr_info("%s: %s %s %s%s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
- cap_str, md->read_only ? "(ro)" : "");
-
+ cap_str, md->read_only ? " (ro)" : "");
+
+ /* used in ->open, must be set before add_disk: */
+ if (area_type == MMC_BLK_DATA_AREA_MAIN)
+ dev_set_drvdata(&card->dev, md);
+ disk_fwnode = mmc_blk_get_partitions_node(parent, subname);
+ ret = add_disk_fwnode(md->parent, md->disk, mmc_disk_attr_groups,
+ disk_fwnode);
+ if (ret)
+ goto err_put_disk;
return md;
+ err_put_disk:
+ put_disk(md->disk);
+ blk_mq_free_tag_set(&md->queue.tag_set);
err_kfree:
kfree(md);
out:
- ida_simple_remove(&mmc_blk_ida, devidx);
+ ida_free(&mmc_blk_ida, devidx);
return ERR_PTR(ret);
}
@@ -2410,7 +2674,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
}
return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
- MMC_BLK_DATA_AREA_MAIN);
+ MMC_BLK_DATA_AREA_MAIN, 0);
}
static int mmc_blk_alloc_part(struct mmc_card *card,
@@ -2424,10 +2688,9 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
struct mmc_blk_data *part_md;
part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
- subname, area_type);
+ subname, area_type, part_type);
if (IS_ERR(part_md))
return PTR_ERR(part_md);
- part_md->part_type = part_type;
list_add(&part_md->part, &md->part);
return 0;
@@ -2482,7 +2745,6 @@ static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
get_device(&rpmb->dev);
filp->private_data = rpmb;
- mmc_blk_get(rpmb->md->disk);
return nonseekable_open(inode, filp);
}
@@ -2492,7 +2754,6 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
struct mmc_rpmb_data, chrdev);
- mmc_blk_put(rpmb->md);
put_device(&rpmb->dev);
return 0;
@@ -2502,7 +2763,6 @@ static const struct file_operations mmc_rpmb_fileops = {
.release = mmc_rpmb_chrdev_release,
.open = mmc_rpmb_chrdev_open,
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = mmc_rpmb_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mmc_rpmb_ioctl_compat,
@@ -2513,10 +2773,159 @@ static void mmc_blk_rpmb_device_release(struct device *dev)
{
struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
- ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
+ rpmb_dev_unregister(rpmb->rdev);
+ mmc_blk_put(rpmb->md);
+ ida_free(&mmc_rpmb_ida, rpmb->id);
kfree(rpmb);
}
+static void free_idata(struct mmc_blk_ioc_data **idata, unsigned int cmd_count)
+{
+ unsigned int n;
+
+ for (n = 0; n < cmd_count; n++)
+ kfree(idata[n]);
+ kfree(idata);
+}
+
+static struct mmc_blk_ioc_data **alloc_idata(struct mmc_rpmb_data *rpmb,
+ unsigned int cmd_count)
+{
+ struct mmc_blk_ioc_data **idata;
+ unsigned int n;
+
+ idata = kcalloc(cmd_count, sizeof(*idata), GFP_KERNEL);
+ if (!idata)
+ return NULL;
+
+ for (n = 0; n < cmd_count; n++) {
+ idata[n] = kcalloc(1, sizeof(**idata), GFP_KERNEL);
+ if (!idata[n]) {
+ free_idata(idata, n);
+ return NULL;
+ }
+ idata[n]->rpmb = rpmb;
+ }
+
+ return idata;
+}
+
+static void set_idata(struct mmc_blk_ioc_data *idata, u32 opcode,
+ int write_flag, u8 *buf, unsigned int buf_bytes)
+{
+ /*
+ * The size of an RPMB frame must match what's expected by the
+ * hardware.
+ */
+ static_assert(!CHECK_SIZE_NEQ(512), "RPMB frame size must be 512 bytes");
+
+ idata->ic.opcode = opcode;
+ idata->ic.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ idata->ic.write_flag = write_flag;
+ idata->ic.blksz = RPMB_FRAME_SIZE;
+ idata->ic.blocks = buf_bytes / idata->ic.blksz;
+ idata->buf = buf;
+ idata->buf_bytes = buf_bytes;
+}
+
+static int mmc_route_rpmb_frames(struct device *dev, u8 *req,
+ unsigned int req_len, u8 *resp,
+ unsigned int resp_len)
+{
+ struct rpmb_frame *frm = (struct rpmb_frame *)req;
+ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
+ struct mmc_blk_data *md = rpmb->md;
+ struct mmc_blk_ioc_data **idata;
+ struct mmc_queue_req *mq_rq;
+ unsigned int cmd_count;
+ struct request *rq;
+ u16 req_type;
+ bool write;
+ int ret;
+
+ if (IS_ERR(md->queue.card))
+ return PTR_ERR(md->queue.card);
+
+ if (req_len < RPMB_FRAME_SIZE)
+ return -EINVAL;
+
+ req_type = be16_to_cpu(frm->req_resp);
+ switch (req_type) {
+ case RPMB_PROGRAM_KEY:
+ if (CHECK_SIZE_NEQ(req_len) || CHECK_SIZE_NEQ(resp_len))
+ return -EINVAL;
+ write = true;
+ break;
+ case RPMB_GET_WRITE_COUNTER:
+ if (CHECK_SIZE_NEQ(req_len) || CHECK_SIZE_NEQ(resp_len))
+ return -EINVAL;
+ write = false;
+ break;
+ case RPMB_WRITE_DATA:
+ if (!CHECK_SIZE_ALIGNED(req_len) || CHECK_SIZE_NEQ(resp_len))
+ return -EINVAL;
+ write = true;
+ break;
+ case RPMB_READ_DATA:
+ if (CHECK_SIZE_NEQ(req_len) || !CHECK_SIZE_ALIGNED(resp_len))
+ return -EINVAL;
+ write = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Write operations require 3 commands, read operations require 2 */
+ cmd_count = write ? 3 : 2;
+
+ idata = alloc_idata(rpmb, cmd_count);
+ if (!idata)
+ return -ENOMEM;
+
+ if (write) {
+ struct rpmb_frame *resp_frm = (struct rpmb_frame *)resp;
+
+ /* Send write request frame(s) */
+ set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK,
+ 1 | MMC_CMD23_ARG_REL_WR, req, req_len);
+
+ /* Send result request frame */
+ memset(resp_frm, 0, RPMB_FRAME_SIZE);
+ resp_frm->req_resp = cpu_to_be16(RPMB_RESULT_READ);
+ set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp,
+ resp_len);
+
+ /* Read response frame */
+ set_idata(idata[2], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len);
+ } else {
+ /* Send write request frame(s) */
+ set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, 1, req, req_len);
+
+ /* Read response frame */
+ set_idata(idata[1], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len);
+ }
+
+ rq = blk_mq_alloc_request(md->queue.queue, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out;
+ }
+
+ mq_rq = req_to_mmc_queue_req(rq);
+ mq_rq->drv_op = MMC_DRV_OP_IOCTL_RPMB;
+ mq_rq->drv_op_result = -EIO;
+ mq_rq->drv_op_data = idata;
+ mq_rq->ioc_count = cmd_count;
+ blk_execute_rq(rq, false);
+ ret = req_to_mmc_queue_req(rq)->drv_op_result;
+
+ blk_mq_free_request(rq);
+
+out:
+ free_idata(idata, cmd_count);
+ return ret;
+}
+
static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
struct mmc_blk_data *md,
unsigned int part_index,
@@ -2529,13 +2938,13 @@ static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
struct mmc_rpmb_data *rpmb;
/* This creates the minor number for the RPMB char device */
- devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
+ devidx = ida_alloc_max(&mmc_rpmb_ida, max_devices - 1, GFP_KERNEL);
if (devidx < 0)
return devidx;
rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
if (!rpmb) {
- ida_simple_remove(&mmc_rpmb_ida, devidx);
+ ida_free(&mmc_rpmb_ida, devidx);
return -ENOMEM;
}
@@ -2551,6 +2960,7 @@ static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
rpmb->dev.release = mmc_blk_rpmb_device_release;
device_initialize(&rpmb->dev);
dev_set_drvdata(&rpmb->dev, rpmb);
+ mmc_blk_get(md->disk);
rpmb->md = md;
cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
@@ -2628,27 +3038,13 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
static void mmc_blk_remove_req(struct mmc_blk_data *md)
{
- struct mmc_card *card;
-
- if (md) {
- /*
- * Flush remaining requests and free queues. It
- * is freeing the queue that stops new requests
- * from being accepted.
- */
- card = md->queue.card;
- if (md->disk->flags & GENHD_FL_UP) {
- device_remove_file(disk_to_dev(md->disk), &md->force_ro);
- if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
- card->ext_csd.boot_ro_lockable)
- device_remove_file(disk_to_dev(md->disk),
- &md->power_ro_lock);
-
- del_gendisk(md->disk);
- }
- mmc_cleanup_queue(&md->queue);
- mmc_blk_put(md);
- }
+ /*
+ * Flush remaining requests and free queues. It is freeing the queue
+ * that stops new requests from being accepted.
+ */
+ del_gendisk(md->disk);
+ mmc_cleanup_queue(&md->queue);
+ mmc_blk_put(md);
}
static void mmc_blk_remove_parts(struct mmc_card *card,
@@ -2672,51 +3068,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
}
}
-static int mmc_add_disk(struct mmc_blk_data *md)
-{
- int ret;
- struct mmc_card *card = md->queue.card;
-
- device_add_disk(md->parent, md->disk, NULL);
- md->force_ro.show = force_ro_show;
- md->force_ro.store = force_ro_store;
- sysfs_attr_init(&md->force_ro.attr);
- md->force_ro.attr.name = "force_ro";
- md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
- ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
- if (ret)
- goto force_ro_fail;
-
- if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
- card->ext_csd.boot_ro_lockable) {
- umode_t mode;
-
- if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
- mode = S_IRUGO;
- else
- mode = S_IRUGO | S_IWUSR;
-
- md->power_ro_lock.show = power_ro_lock_show;
- md->power_ro_lock.store = power_ro_lock_store;
- sysfs_attr_init(&md->power_ro_lock.attr);
- md->power_ro_lock.attr.mode = mode;
- md->power_ro_lock.attr.name =
- "ro_lock_until_next_power_on";
- ret = device_create_file(disk_to_dev(md->disk),
- &md->power_ro_lock);
- if (ret)
- goto power_ro_lock_fail;
- }
- return ret;
-
-power_ro_lock_fail:
- device_remove_file(disk_to_dev(md->disk), &md->force_ro);
-force_ro_fail:
- del_gendisk(md->disk);
-
- return ret;
-}
-
#ifdef CONFIG_DEBUG_FS
static int mmc_dbg_card_status_get(void *data, u64 *val)
@@ -2728,17 +3079,18 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
int ret;
/* Ask the block layer about the card status */
- req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
- blk_execute_rq(NULL, req, 0);
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
if (ret >= 0) {
*val = ret;
ret = 0;
}
- blk_put_request(req);
+ blk_mq_free_request(req);
return ret;
}
@@ -2764,16 +3116,17 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
return -ENOMEM;
/* Ask the block layer for the EXT CSD */
- req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_free;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
err = req_to_mmc_queue_req(req)->drv_op_result;
- blk_put_request(req);
+ blk_mq_free_request(req);
if (err) {
pr_err("FAILED %d\n", err);
goto out_free;
@@ -2820,12 +3173,12 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
.llseek = default_llseek,
};
-static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
+static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{
struct dentry *root;
if (!card->debugfs_root)
- return 0;
+ return;
root = card->debugfs_root;
@@ -2834,19 +3187,13 @@ static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
debugfs_create_file_unsafe("status", 0400, root,
card,
&mmc_dbg_card_status_fops);
- if (!md->status_dentry)
- return -EIO;
}
if (mmc_card_mmc(card)) {
md->ext_csd_dentry =
- debugfs_create_file("ext_csd", S_IRUSR, root, card,
+ debugfs_create_file("ext_csd", 0400, root, card,
&mmc_dbg_ext_csd_fops);
- if (!md->ext_csd_dentry)
- return -EIO;
}
-
- return 0;
}
static void mmc_blk_remove_debugfs(struct mmc_card *card,
@@ -2855,22 +3202,17 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card,
if (!card->debugfs_root)
return;
- if (!IS_ERR_OR_NULL(md->status_dentry)) {
- debugfs_remove(md->status_dentry);
- md->status_dentry = NULL;
- }
+ debugfs_remove(md->status_dentry);
+ md->status_dentry = NULL;
- if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
- debugfs_remove(md->ext_csd_dentry);
- md->ext_csd_dentry = NULL;
- }
+ debugfs_remove(md->ext_csd_dentry);
+ md->ext_csd_dentry = NULL;
}
#else
-static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
+static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{
- return 0;
}
static void mmc_blk_remove_debugfs(struct mmc_card *card,
@@ -2880,9 +3222,45 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card,
#endif /* CONFIG_DEBUG_FS */
+static void mmc_blk_rpmb_add(struct mmc_card *card)
+{
+ struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+ struct mmc_rpmb_data *rpmb;
+ struct rpmb_dev *rdev;
+ unsigned int n;
+ u32 cid[4];
+ struct rpmb_descr descr = {
+ .type = RPMB_TYPE_EMMC,
+ .route_frames = mmc_route_rpmb_frames,
+ .reliable_wr_count = card->ext_csd.enhanced_rpmb_supported ?
+ 2 : 32,
+ .capacity = card->ext_csd.raw_rpmb_size_mult,
+ .dev_id = (void *)cid,
+ .dev_id_len = sizeof(cid),
+ };
+
+ /*
+ * Provice CID as an octet array. The CID needs to be interpreted
+ * when used as input to derive the RPMB key since some fields
+ * will change due to firmware updates.
+ */
+ for (n = 0; n < 4; n++)
+ cid[n] = be32_to_cpu((__force __be32)card->raw_cid[n]);
+
+ list_for_each_entry(rpmb, &md->rpmbs, node) {
+ rdev = rpmb_dev_register(&rpmb->dev, &descr);
+ if (IS_ERR(rdev)) {
+ pr_warn("%s: could not register RPMB device\n",
+ dev_name(&rpmb->dev));
+ continue;
+ }
+ rpmb->rdev = rdev;
+ }
+}
+
static int mmc_blk_probe(struct mmc_card *card)
{
- struct mmc_blk_data *md, *part_md;
+ struct mmc_blk_data *md;
int ret = 0;
/*
@@ -2894,7 +3272,8 @@ static int mmc_blk_probe(struct mmc_card *card)
mmc_fixup_device(card, mmc_blk_fixups);
card->complete_wq = alloc_workqueue("mmc_complete",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU,
+ 0);
if (!card->complete_wq) {
pr_err("Failed to create mmc completion workqueue");
return -ENOMEM;
@@ -2910,18 +3289,6 @@ static int mmc_blk_probe(struct mmc_card *card)
if (ret)
goto out;
- dev_set_drvdata(&card->dev, md);
-
- ret = mmc_add_disk(md);
- if (ret)
- goto out;
-
- list_for_each_entry(part_md, &md->part, part) {
- ret = mmc_add_disk(part_md);
- if (ret)
- goto out;
- }
-
/* Add two debugfs entries */
mmc_blk_add_debugfs(card, md);
@@ -2932,11 +3299,13 @@ static int mmc_blk_probe(struct mmc_card *card)
* Don't enable runtime PM for SD-combo cards here. Leave that
* decision to be taken during the SDIO init sequence instead.
*/
- if (card->type != MMC_TYPE_SD_COMBO) {
+ if (!mmc_card_sd_combo(card)) {
pm_runtime_set_active(&card->dev);
pm_runtime_enable(&card->dev);
}
+ mmc_blk_rpmb_add(card);
+
return 0;
out:
@@ -2959,11 +3328,10 @@ static void mmc_blk_remove(struct mmc_card *card)
mmc_blk_part_switch(card, md->part_type);
mmc_release_host(card->host);
}
- if (card->type != MMC_TYPE_SD_COMBO)
+ if (!mmc_card_sd_combo(card))
pm_runtime_disable(&card->dev);
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
- dev_set_drvdata(&card->dev, NULL);
destroy_workqueue(card->complete_wq);
}
@@ -3078,4 +3446,3 @@ module_exit(mmc_blk_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
-
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 4383c262b3f5..ec4f3462bf80 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -15,9 +15,11 @@
#include <linux/stat.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
#include "core.h"
#include "card.h"
@@ -34,13 +36,13 @@ static ssize_t type_show(struct device *dev,
switch (card->type) {
case MMC_TYPE_MMC:
- return sprintf(buf, "MMC\n");
+ return sysfs_emit(buf, "MMC\n");
case MMC_TYPE_SD:
- return sprintf(buf, "SD\n");
+ return sysfs_emit(buf, "SD\n");
case MMC_TYPE_SDIO:
- return sprintf(buf, "SDIO\n");
+ return sysfs_emit(buf, "SDIO\n");
case MMC_TYPE_SD_COMBO:
- return sprintf(buf, "SDcombo\n");
+ return sysfs_emit(buf, "SDcombo\n");
default:
return -EFAULT;
}
@@ -53,20 +55,10 @@ static struct attribute *mmc_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(mmc_dev);
-/*
- * This currently matches any MMC driver to any MMC card - drivers
- * themselves make the decision whether to drive this card in their
- * probe method.
- */
-static int mmc_bus_match(struct device *dev, struct device_driver *drv)
-{
- return 1;
-}
-
static int
-mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+mmc_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mmc_card *card = mmc_dev_to_card(dev);
+ const struct mmc_card *card = mmc_dev_to_card(dev);
const char *type;
unsigned int i;
int retval = 0;
@@ -94,7 +86,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
return retval;
}
- if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+ if (mmc_card_sdio(card) || mmc_card_sd_combo(card)) {
retval = add_uevent_var(env, "SDIO_ID=%04X:%04X",
card->cis.vendor, card->cis.device);
if (retval)
@@ -116,7 +108,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
* SDIO (non-combo) cards are not handled by mmc_block driver and do not
* have accessible CID register which used by mmc_card_name() function.
*/
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
return 0;
retval = add_uevent_var(env, "MMC_NAME=%s", mmc_card_name(card));
@@ -140,14 +132,12 @@ static int mmc_bus_probe(struct device *dev)
return drv->probe(card);
}
-static int mmc_bus_remove(struct device *dev)
+static void mmc_bus_remove(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = mmc_dev_to_card(dev);
drv->remove(card);
-
- return 0;
}
static void mmc_bus_shutdown(struct device *dev)
@@ -160,6 +150,8 @@ static void mmc_bus_shutdown(struct device *dev)
if (dev->driver && drv->shutdown)
drv->shutdown(card);
+ __mmc_stop_host(host);
+
if (host->bus_ops->shutdown) {
ret = host->bus_ops->shutdown(host);
if (ret)
@@ -225,10 +217,9 @@ static const struct dev_pm_ops mmc_bus_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume)
};
-static struct bus_type mmc_bus_type = {
+static const struct bus_type mmc_bus_type = {
.name = "mmc",
.dev_groups = mmc_dev_groups,
- .match = mmc_bus_match,
.uevent = mmc_bus_uevent,
.probe = mmc_bus_probe,
.remove = mmc_bus_remove,
@@ -284,7 +275,7 @@ static void mmc_release_card(struct device *dev)
/*
* Allocate and initialise a new MMC card structure.
*/
-struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type)
+struct mmc_card *mmc_alloc_card(struct mmc_host *host, const struct device_type *type)
{
struct mmc_card *card;
@@ -311,6 +302,7 @@ int mmc_add_card(struct mmc_card *card)
{
int ret;
const char *type;
+ const char *speed_mode = "";
const char *uhs_bus_speed_mode = "";
static const char *const uhs_speeds[] = {
[UHS_SDR12_BUS_SPEED] = "SDR12 ",
@@ -322,6 +314,9 @@ int mmc_add_card(struct mmc_card *card)
dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca);
+ dev_set_removable(&card->dev,
+ mmc_card_is_removable(card->host) ?
+ DEVICE_REMOVABLE : DEVICE_FIXED);
switch (card->type) {
case MMC_TYPE_MMC:
@@ -330,7 +325,9 @@ int mmc_add_card(struct mmc_card *card)
case MMC_TYPE_SD:
type = "SD";
if (mmc_card_blockaddr(card)) {
- if (mmc_card_ext_capacity(card))
+ if (mmc_card_ult_capacity(card))
+ type = "SDUC";
+ else if (mmc_card_ext_capacity(card))
type = "SDXC";
else
type = "SDHC";
@@ -349,31 +346,34 @@ int mmc_add_card(struct mmc_card *card)
break;
}
+ if (mmc_card_hs(card))
+ speed_mode = "high speed ";
+ else if (mmc_card_uhs(card))
+ speed_mode = "UHS-I speed ";
+ else if (mmc_card_uhs2(card->host))
+ speed_mode = "UHS-II speed ";
+ else if (mmc_card_ddr52(card))
+ speed_mode = "high speed DDR ";
+ else if (mmc_card_hs200(card))
+ speed_mode = "HS200 ";
+ else if (mmc_card_hs400es(card))
+ speed_mode = "HS400 Enhanced strobe ";
+ else if (mmc_card_hs400(card))
+ speed_mode = "HS400 ";
+
if (mmc_card_uhs(card) &&
(card->sd_bus_speed < ARRAY_SIZE(uhs_speeds)))
uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed];
- if (mmc_host_is_spi(card->host)) {
- pr_info("%s: new %s%s%s card on SPI\n",
- mmc_hostname(card->host),
- mmc_card_hs(card) ? "high speed " : "",
- mmc_card_ddr52(card) ? "DDR " : "",
- type);
- } else {
- pr_info("%s: new %s%s%s%s%s%s card at address %04x\n",
- mmc_hostname(card->host),
- mmc_card_uhs(card) ? "ultra high speed " :
- (mmc_card_hs(card) ? "high speed " : ""),
- mmc_card_hs400(card) ? "HS400 " :
- (mmc_card_hs200(card) ? "HS200 " : ""),
- mmc_card_hs400es(card) ? "Enhanced strobe " : "",
- mmc_card_ddr52(card) ? "DDR " : "",
+ if (mmc_host_is_spi(card->host))
+ pr_info("%s: new %s%s card on SPI\n",
+ mmc_hostname(card->host), speed_mode, type);
+ else
+ pr_info("%s: new %s%s%s card at address %04x\n",
+ mmc_hostname(card->host), speed_mode,
uhs_bus_speed_mode, type, card->rca);
- }
-#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
-#endif
card->dev.of_node = mmc_of_find_child_device(card->host, 0);
device_enable_async_suspend(&card->dev);
@@ -384,6 +384,14 @@ int mmc_add_card(struct mmc_card *card)
mmc_card_set_present(card);
+ /*
+ * Register for undervoltage notification if the card supports
+ * power-off notification, enabling emergency shutdowns.
+ */
+ if (mmc_card_mmc(card) &&
+ card->ext_csd.power_off_notification == EXT_CSD_POWER_ON)
+ mmc_regulator_register_undervoltage_notifier(card->host);
+
return 0;
}
@@ -395,9 +403,10 @@ void mmc_remove_card(struct mmc_card *card)
{
struct mmc_host *host = card->host;
-#ifdef CONFIG_DEBUG_FS
+ if (mmc_card_present(card))
+ mmc_regulator_unregister_undervoltage_notifier(host);
+
mmc_remove_card_debugfs(card);
-#endif
if (mmc_card_present(card)) {
if (mmc_host_is_spi(card->host)) {
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 8105852c4b62..8b69624fa46e 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -9,6 +9,7 @@
#define _MMC_CORE_BUS_H
#include <linux/device.h>
+#include <linux/sysfs.h>
struct mmc_host;
struct mmc_card;
@@ -17,12 +18,12 @@ struct mmc_card;
static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct mmc_card *card = mmc_dev_to_card(dev); \
- return sprintf(buf, fmt, args); \
+ return sysfs_emit(buf, fmt, args); \
} \
-static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
+static DEVICE_ATTR(name, 0444, mmc_##name##_show, NULL)
struct mmc_card *mmc_alloc_card(struct mmc_host *host,
- struct device_type *type);
+ const struct device_type *type);
int mmc_add_card(struct mmc_card *card);
void mmc_remove_card(struct mmc_card *card);
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 7bd392d55cfa..1200951bab08 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -23,6 +23,7 @@
#define MMC_CARD_SDXC (1<<3) /* card is SDXC */
#define MMC_CARD_REMOVED (1<<4) /* card has been removed */
#define MMC_STATE_SUSPENDED (1<<5) /* card is suspended */
+#define MMC_CARD_SDUC (1<<6) /* card is SDUC */
#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT)
#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
@@ -30,11 +31,13 @@
#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
+#define mmc_card_ult_capacity(c) ((c)->state & MMC_CARD_SDUC)
#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
+#define mmc_card_set_ult_capacity(c) ((c)->state |= MMC_CARD_SDUC)
#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
@@ -53,41 +56,57 @@ struct mmc_fixup {
unsigned int manfid;
unsigned short oemid;
+ /* Manufacturing date */
+ unsigned short year;
+ unsigned char month;
+
/* SDIO-specific fields. You can use SDIO_ANY_ID here of course */
u16 cis_vendor, cis_device;
/* for MMC cards */
unsigned int ext_csd_rev;
+ /* Match against functions declared in device tree */
+ const char *of_compatible;
+
void (*vendor_fixup)(struct mmc_card *card, int data);
int data;
};
#define CID_MANFID_ANY (-1u)
#define CID_OEMID_ANY ((unsigned short) -1)
+#define CID_YEAR_ANY ((unsigned short) -1)
+#define CID_MONTH_ANY ((unsigned char) -1)
#define CID_NAME_ANY (NULL)
#define EXT_CSD_REV_ANY (-1u)
#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_SANDISK_SD 0x3
#define CID_MANFID_ATP 0x9
#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_GIGASTONE 0x12
#define CID_MANFID_MICRON 0x13
#define CID_MANFID_SAMSUNG 0x15
#define CID_MANFID_APACER 0x27
+#define CID_MANFID_SWISSBIT 0x5D
#define CID_MANFID_KINGSTON 0x70
#define CID_MANFID_HYNIX 0x90
+#define CID_MANFID_KINGSTON_SD 0x9F
#define CID_MANFID_NUMONYX 0xFE
#define END_FIXUP { NULL }
-#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
- _cis_vendor, _cis_device, \
- _fixup, _data, _ext_csd_rev) \
+#define _FIXUP_EXT(_name, _manfid, _oemid, _year, _month, \
+ _rev_start, _rev_end, \
+ _cis_vendor, _cis_device, \
+ _fixup, _data, _ext_csd_rev) \
{ \
.name = (_name), \
.manfid = (_manfid), \
.oemid = (_oemid), \
+ .year = (_year), \
+ .month = (_month), \
.rev_start = (_rev_start), \
.rev_end = (_rev_end), \
.cis_vendor = (_cis_vendor), \
@@ -99,8 +118,8 @@ struct mmc_fixup {
#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
_fixup, _data, _ext_csd_rev) \
- _FIXUP_EXT(_name, _manfid, \
- _oemid, _rev_start, _rev_end, \
+ _FIXUP_EXT(_name, _manfid, _oemid, CID_YEAR_ANY, CID_MONTH_ANY, \
+ _rev_start, _rev_end, \
SDIO_ANY_ID, SDIO_ANY_ID, \
_fixup, _data, _ext_csd_rev) \
@@ -114,11 +133,27 @@ struct mmc_fixup {
_ext_csd_rev)
#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
- _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
- CID_OEMID_ANY, 0, -1ull, \
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, CID_OEMID_ANY, \
+ CID_YEAR_ANY, CID_MONTH_ANY, \
+ 0, -1ull, \
_vendor, _device, \
_fixup, _data, EXT_CSD_REV_ANY) \
+#define SDIO_FIXUP_COMPATIBLE(_compatible, _fixup, _data) \
+ { \
+ .name = CID_NAME_ANY, \
+ .manfid = CID_MANFID_ANY, \
+ .oemid = CID_OEMID_ANY, \
+ .rev_start = 0, \
+ .rev_end = -1ull, \
+ .cis_vendor = SDIO_ANY_ID, \
+ .cis_device = SDIO_ANY_ID, \
+ .vendor_fixup = (_fixup), \
+ .data = (_data), \
+ .ext_csd_rev = EXT_CSD_REV_ANY, \
+ .of_compatible = _compatible, \
+ }
+
#define cid_rev(hwrev, fwrev, year, month) \
(((u64) hwrev) << 40 | \
((u64) fwrev) << 32 | \
@@ -150,6 +185,24 @@ static inline void __maybe_unused add_limit_rate_quirk(struct mmc_card *card,
card->quirk_max_rate = data;
}
+static inline void __maybe_unused wl1251_quirk(struct mmc_card *card,
+ int data)
+{
+ /*
+ * We have TI wl1251 attached to this mmc. Pass this
+ * information to the SDIO core because it can't be
+ * probed by normal methods.
+ */
+
+ dev_info(card->host->parent, "found wl1251\n");
+ card->quirks |= MMC_QUIRK_NONSTD_SDIO;
+ card->cccr.wide_bus = 1;
+ card->cis.vendor = 0x104c;
+ card->cis.device = 0x9066;
+ card->cis.blksize = 512;
+ card->cis.max_dtr = 24000000;
+}
+
/*
* Quirk add/remove for MMC products.
*/
@@ -192,14 +245,19 @@ static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
}
+static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
+}
+
static inline int mmc_card_disable_cd(const struct mmc_card *c)
{
return c->quirks & MMC_QUIRK_DISABLE_CD;
}
-static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
+static inline int mmc_card_blk_no_cmd23(const struct mmc_card *c)
{
- return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
+ return c->quirks & MMC_QUIRK_BLK_NO_CMD23;
}
static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
@@ -222,4 +280,29 @@ static inline int mmc_card_broken_hpi(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_HPI;
}
+static inline int mmc_card_broken_sd_discard(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD;
+}
+
+static inline int mmc_card_broken_sd_cache(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE;
+}
+
+static inline int mmc_card_broken_cache_flush(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH;
+}
+
+static inline int mmc_card_broken_sd_poweroff_notify(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY;
+}
+
+static inline int mmc_card_no_uhs_ddr50_tuning(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_NO_UHS_DDR50_TUNING;
+}
+
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 95fedcf56e4a..860378bea557 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -19,7 +19,6 @@
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/suspend.h>
#include <linux/fault-inject.h>
#include <linux/random.h>
@@ -56,7 +55,7 @@ static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
/*
* Enabling software CRCs on the data blocks can be a significant (30%)
* performance cost, and for other reasons may not always be desired.
- * So we allow it it to be disabled.
+ * So we allow it to be disabled.
*/
bool use_spi_crc = 1;
module_param(use_spi_crc, bool, 0);
@@ -97,8 +96,8 @@ static void mmc_should_fail_request(struct mmc_host *host,
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
- data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
- data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
+ data->error = data_errors[get_random_u32_below(ARRAY_SIZE(data_errors))];
+ data->bytes_xfered = get_random_u32_below(data->bytes_xfered >> 9) << 9;
}
#else /* CONFIG_FAIL_MMC_REQUEST */
@@ -142,8 +141,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
int err = cmd->error;
/* Flag re-tuning needed on CRC errors */
- if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
- cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
+ if (!mmc_op_tuning(cmd->opcode) &&
!host->retune_crc_disable &&
(err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
(mrq->data && mrq->data->error == -EILSEQ) ||
@@ -337,6 +335,9 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
+ if (mrq->cmd->has_ext_addr)
+ mmc_send_ext_addr(host, mrq->cmd->ext_addr);
+
init_completion(&mrq->cmd_completion);
mmc_retune_hold(host);
@@ -352,6 +353,9 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
if (err)
return err;
+ if (host->uhs2_sd_tran)
+ mmc_uhs2_prepare_cmd(host, mrq);
+
led_trigger_event(host->led, LED_FULL);
__mmc_start_request(host, mrq);
@@ -451,6 +455,9 @@ int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
if (err)
goto out_err;
+ if (host->uhs2_sd_tran)
+ mmc_uhs2_prepare_cmd(host, mrq);
+
err = host->cqe_ops->cqe_request(host, mrq);
if (err)
goto out_err;
@@ -527,7 +534,7 @@ EXPORT_SYMBOL(mmc_cqe_post_req);
* mmc_cqe_recovery - Recover from CQE errors.
* @host: MMC host to recover
*
- * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
+ * Recovery consists of stopping CQE, stopping eMMC, discarding the queue
* in eMMC, and discarding the queue in CQE. CQE must call
* mmc_cqe_request_done() on all requests. An error is returned if the eMMC
* fails to discard its queue.
@@ -549,21 +556,24 @@ int mmc_cqe_recovery(struct mmc_host *host)
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_STOP_TRANSMISSION;
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.flags = MMC_RSP_R1B_NO_CRC | MMC_CMD_AC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
- mmc_wait_for_cmd(host, &cmd, 0);
+ mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
+ mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT;
cmd.arg = 1; /* Discard entire queue */
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.flags = MMC_RSP_R1B_NO_CRC | MMC_CMD_AC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
- err = mmc_wait_for_cmd(host, &cmd, 0);
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
host->cqe_ops->cqe_recovery_finish(host);
+ if (err)
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
mmc_retune_release(host);
return err;
@@ -872,7 +882,6 @@ void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
WARN_ON(ctx && host->claimer != ctx);
mmc_release_host(host);
- pm_runtime_mark_last_busy(&card->dev);
pm_runtime_put_autosuspend(&card->dev);
}
EXPORT_SYMBOL(mmc_put_card);
@@ -936,14 +945,17 @@ int mmc_execute_tuning(struct mmc_card *card)
opcode = MMC_SEND_TUNING_BLOCK;
err = host->ops->execute_tuning(host, opcode);
+ if (!err) {
+ mmc_retune_clear(host);
+ mmc_retune_enable(host);
+ return 0;
+ }
- if (err) {
+ /* Only print error when we don't check for card removal */
+ if (!host->detect_change) {
pr_err("%s: tuning execution failed: %d\n",
mmc_hostname(host), err);
- } else {
- host->retune_now = 0;
- host->need_retune = 0;
- mmc_retune_enable(host);
+ mmc_debugfs_err_stats_inc(host, MMC_ERR_TUNING);
}
return err;
@@ -1125,13 +1137,19 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
return 0;
}
- if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
+ if (!mmc_card_uhs2(host) && host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
bit = ffs(ocr) - 1;
ocr &= 3 << bit;
mmc_power_cycle(host, ocr);
} else {
bit = fls(ocr) - 1;
- ocr &= 3 << bit;
+ /*
+ * The bit variable represents the highest voltage bit set in
+ * the OCR register.
+ * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
+ * we must shift the mask '3' with (bit - 1).
+ */
+ ocr &= 3 << (bit - 1);
if (bit != host->ios.vdd)
dev_warn(mmc_dev(host), "exceeding card's volts\n");
}
@@ -1380,6 +1398,29 @@ void mmc_power_cycle(struct mmc_host *host, u32 ocr)
mmc_power_up(host, ocr);
}
+/**
+ * mmc_handle_undervoltage - Handle an undervoltage event on the MMC bus
+ * @host: The MMC host that detected the undervoltage condition
+ *
+ * This function is called when an undervoltage event is detected on one of
+ * the MMC regulators.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int mmc_handle_undervoltage(struct mmc_host *host)
+{
+ /* Stop the host to prevent races with card removal */
+ __mmc_stop_host(host);
+
+ if (!host->bus_ops || !host->bus_ops->handle_undervoltage)
+ return 0;
+
+ dev_warn(mmc_dev(host), "%s: Undervoltage detected, initiating emergency stop\n",
+ mmc_hostname(host));
+
+ return host->bus_ops->handle_undervoltage(host);
+}
+
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
@@ -1475,6 +1516,11 @@ void mmc_init_erase(struct mmc_card *card)
card->pref_erase = 0;
}
+static bool is_trim_arg(unsigned int arg)
+{
+ return (arg & MMC_TRIM_OR_DISCARD_ARGS) && arg != MMC_DISCARD_ARG;
+}
+
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
unsigned int arg, unsigned int qty)
{
@@ -1580,8 +1626,8 @@ static unsigned int mmc_erase_timeout(struct mmc_card *card,
return mmc_mmc_erase_timeout(card, arg, qty);
}
-static int mmc_do_erase(struct mmc_card *card, unsigned int from,
- unsigned int to, unsigned int arg)
+static int mmc_do_erase(struct mmc_card *card, sector_t from,
+ sector_t to, unsigned int arg)
{
struct mmc_command cmd = {};
unsigned int qty = 0, busy_timeout = 0;
@@ -1612,8 +1658,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
else if (mmc_card_sd(card))
qty += to - from + 1;
else
- qty += ((to / card->erase_size) -
- (from / card->erase_size)) + 1;
+ qty += (mmc_sector_div(to, card->erase_size) -
+ mmc_sector_div(from, card->erase_size)) + 1;
if (!mmc_card_blockaddr(card)) {
from <<= 9;
@@ -1626,6 +1672,12 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
cmd.opcode = MMC_ERASE_GROUP_START;
cmd.arg = from;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ if (mmc_card_ult_capacity(card)) {
+ cmd.ext_addr = from >> 32;
+ cmd.has_ext_addr = true;
+ }
+
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: group start error %d, "
@@ -1641,6 +1693,12 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
cmd.opcode = MMC_ERASE_GROUP_END;
cmd.arg = to;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ if (mmc_card_ult_capacity(card)) {
+ cmd.ext_addr = to >> 32;
+ cmd.has_ext_addr = true;
+ }
+
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: group end error %d, status %#x\n",
@@ -1682,18 +1740,19 @@ out:
}
static unsigned int mmc_align_erase_size(struct mmc_card *card,
- unsigned int *from,
- unsigned int *to,
+ sector_t *from,
+ sector_t *to,
unsigned int nr)
{
- unsigned int from_new = *from, nr_new = nr, rem;
+ sector_t from_new = *from;
+ unsigned int nr_new = nr, rem;
/*
* When the 'card->erase_size' is power of 2, we can use round_up/down()
* to align the erase size efficiently.
*/
if (is_power_of_2(card->erase_size)) {
- unsigned int temp = from_new;
+ sector_t temp = from_new;
from_new = round_up(temp, card->erase_size);
rem = from_new - temp;
@@ -1705,7 +1764,7 @@ static unsigned int mmc_align_erase_size(struct mmc_card *card,
nr_new = round_down(nr_new, card->erase_size);
} else {
- rem = from_new % card->erase_size;
+ rem = mmc_sector_mod(from_new, card->erase_size);
if (rem) {
rem = card->erase_size - rem;
from_new += rem;
@@ -1738,10 +1797,12 @@ static unsigned int mmc_align_erase_size(struct mmc_card *card,
*
* Caller must claim host before calling this function.
*/
-int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+int mmc_erase(struct mmc_card *card, sector_t from, unsigned int nr,
unsigned int arg)
{
- unsigned int rem, to = from + nr;
+ unsigned int rem;
+ sector_t to = from + nr;
+
int err;
if (!(card->csd.cmdclass & CCC_ERASE))
@@ -1757,12 +1818,12 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
return -EOPNOTSUPP;
- if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) &&
+ if (mmc_card_mmc(card) && is_trim_arg(arg) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
return -EOPNOTSUPP;
if (arg == MMC_SECURE_ERASE_ARG) {
- if (from % card->erase_size || nr % card->erase_size)
+ if (mmc_sector_mod(from, card->erase_size) || nr % card->erase_size)
return -EINVAL;
}
@@ -1786,8 +1847,8 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
* and call mmc_do_erase() twice if necessary. This special case is
* identified by the card->eg_boundary flag.
*/
- rem = card->erase_size - (from % card->erase_size);
- if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
+ rem = card->erase_size - mmc_sector_mod(from, card->erase_size);
+ if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) {
err = mmc_do_erase(card, from, from + rem - 1, arg);
from += rem;
if ((err) || (to <= from))
@@ -1798,59 +1859,60 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
}
EXPORT_SYMBOL(mmc_erase);
-int mmc_can_erase(struct mmc_card *card)
+bool mmc_card_can_erase(struct mmc_card *card)
{
- if (card->csd.cmdclass & CCC_ERASE && card->erase_size)
- return 1;
- return 0;
+ return (card->csd.cmdclass & CCC_ERASE && card->erase_size);
}
-EXPORT_SYMBOL(mmc_can_erase);
+EXPORT_SYMBOL(mmc_card_can_erase);
-int mmc_can_trim(struct mmc_card *card)
+bool mmc_card_can_trim(struct mmc_card *card)
{
- if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
- (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
- return 1;
- return 0;
+ return ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
+ (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)));
}
-EXPORT_SYMBOL(mmc_can_trim);
+EXPORT_SYMBOL(mmc_card_can_trim);
-int mmc_can_discard(struct mmc_card *card)
+bool mmc_card_can_discard(struct mmc_card *card)
{
/*
* As there's no way to detect the discard support bit at v4.5
* use the s/w feature support filed.
*/
- if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
- return 1;
- return 0;
+ return (card->ext_csd.feature_support & MMC_DISCARD_FEATURE);
}
-EXPORT_SYMBOL(mmc_can_discard);
+EXPORT_SYMBOL(mmc_card_can_discard);
-int mmc_can_sanitize(struct mmc_card *card)
+bool mmc_card_can_sanitize(struct mmc_card *card)
{
- if (!mmc_can_trim(card) && !mmc_can_erase(card))
- return 0;
+ if (!mmc_card_can_trim(card) && !mmc_card_can_erase(card))
+ return false;
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
- return 1;
- return 0;
+ return true;
+ return false;
}
-int mmc_can_secure_erase_trim(struct mmc_card *card)
+bool mmc_card_can_secure_erase_trim(struct mmc_card *card)
{
- if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
- !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
- return 1;
- return 0;
+ return ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
+ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN));
}
-EXPORT_SYMBOL(mmc_can_secure_erase_trim);
+EXPORT_SYMBOL(mmc_card_can_secure_erase_trim);
-int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
+bool mmc_card_can_cmd23(struct mmc_card *card)
+{
+ return ((mmc_card_mmc(card) &&
+ card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
+ (mmc_card_sd(card) && !mmc_card_ult_capacity(card) &&
+ card->scr.cmds & SD_SCR_CMD23_SUPPORT));
+}
+EXPORT_SYMBOL(mmc_card_can_cmd23);
+
+int mmc_erase_group_aligned(struct mmc_card *card, sector_t from,
unsigned int nr)
{
if (!card->erase_size)
return 0;
- if (from % card->erase_size || nr % card->erase_size)
+ if (mmc_sector_mod(from, card->erase_size) || nr % card->erase_size)
return 0;
return 1;
}
@@ -1948,7 +2010,7 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
return card->pref_erase;
max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
- if (mmc_can_trim(card)) {
+ if (mmc_card_can_trim(card)) {
max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
if (max_trim < max_discard || max_discard == 0)
max_discard = max_trim;
@@ -1987,14 +2049,14 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
{
mmc_pwrseq_reset(host);
- if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
+ if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->card_hw_reset)
return;
- host->ops->hw_reset(host);
+ host->ops->card_hw_reset(host);
}
/**
* mmc_hw_reset - reset the card in hardware
- * @host: MMC host to which the card is attached
+ * @card: card to be reset
*
* Hard reset the card. This function is only for upper layers, like the
* block layer or card drivers. You cannot use it in host drivers (struct
@@ -2002,8 +2064,9 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
*
* Return: 0 on success, -errno on failure
*/
-int mmc_hw_reset(struct mmc_host *host)
+int mmc_hw_reset(struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
int ret;
ret = host->bus_ops->hw_reset(host);
@@ -2015,8 +2078,9 @@ int mmc_hw_reset(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_hw_reset);
-int mmc_sw_reset(struct mmc_host *host)
+int mmc_sw_reset(struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
int ret;
if (!host->bus_ops->sw_reset)
@@ -2149,6 +2213,41 @@ int mmc_detect_card_removed(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_detect_card_removed);
+int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector)
+{
+ unsigned int boot_sectors_num;
+
+ if ((!(card->host->caps2 & MMC_CAP2_ALT_GPT_TEGRA)))
+ return -EOPNOTSUPP;
+
+ /* filter out unrelated cards */
+ if (card->ext_csd.rev < 3 ||
+ !mmc_card_mmc(card) ||
+ !mmc_card_is_blockaddr(card) ||
+ mmc_card_is_removable(card->host))
+ return -ENOENT;
+
+ /*
+ * eMMC storage has two special boot partitions in addition to the
+ * main one. NVIDIA's bootloader linearizes eMMC boot0->boot1->main
+ * accesses, this means that the partition table addresses are shifted
+ * by the size of boot partitions. In accordance with the eMMC
+ * specification, the boot partition size is calculated as follows:
+ *
+ * boot partition size = 128K byte x BOOT_SIZE_MULT
+ *
+ * Calculate number of sectors occupied by the both boot partitions.
+ */
+ boot_sectors_num = card->ext_csd.raw_boot_mult * SZ_128K /
+ SZ_512 * MMC_NUM_BOOT_PARTITION;
+
+ /* Defined by NVIDIA and used by Android devices. */
+ *gpt_sector = card->ext_csd.sectors - boot_sectors_num - 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_card_alternative_gpt_sector);
+
void mmc_rescan(struct work_struct *work)
{
struct mmc_host *host =
@@ -2194,6 +2293,18 @@ void mmc_rescan(struct work_struct *work)
goto out;
}
+ /*
+ * Ideally we should favor initialization of legacy SD cards and defer
+ * UHS-II enumeration. However, it seems like cards doesn't reliably
+ * announce their support for UHS-II in the response to the ACMD41,
+ * while initializing the legacy SD interface. Therefore, let's start
+ * with UHS-II for now.
+ */
+ if (!mmc_attach_sd_uhs2(host)) {
+ mmc_release_host(host);
+ goto out;
+ }
+
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
unsigned int freq = freqs[i];
if (freq > host->f_max) {
@@ -2206,6 +2317,17 @@ void mmc_rescan(struct work_struct *work)
if (freqs[i] <= host->f_min)
break;
}
+
+ /* A non-removable card should have been detected by now. */
+ if (!mmc_card_is_removable(host) && !host->bus_ops)
+ pr_info("%s: Failed to initialize a non-removable card",
+ mmc_hostname(host));
+
+ /*
+ * Ignore the command timeout errors observed during
+ * the card init as those are excepted.
+ */
+ host->err_stats[MMC_ERR_CMD_TIMEOUT] = 0;
mmc_release_host(host);
out:
@@ -2215,10 +2337,13 @@ void mmc_rescan(struct work_struct *work)
void mmc_start_host(struct mmc_host *host)
{
+ bool power_up = !(host->caps2 &
+ (MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_SD_UHS2));
+
host->f_init = max(min(freqs[0], host->f_max), host->f_min);
host->rescan_disable = 0;
- if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
+ if (power_up) {
mmc_claim_host(host);
mmc_power_up(host, host->ocr_avail);
mmc_release_host(host);
@@ -2228,8 +2353,11 @@ void mmc_start_host(struct mmc_host *host)
_mmc_detect_change(host, 0, false);
}
-void mmc_stop_host(struct mmc_host *host)
+void __mmc_stop_host(struct mmc_host *host)
{
+ if (host->rescan_disable)
+ return;
+
if (host->slot.cd_irq >= 0) {
mmc_gpio_set_cd_wake(host, false);
disable_irq(host->slot.cd_irq);
@@ -2237,6 +2365,11 @@ void mmc_stop_host(struct mmc_host *host)
host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
+}
+
+void mmc_stop_host(struct mmc_host *host)
+{
+ __mmc_stop_host(host);
/* clear pm flags now and let card drivers set them as needed */
host->pm_flags = 0;
@@ -2291,4 +2424,5 @@ static void __exit mmc_exit(void)
subsys_initcall(mmc_init);
module_exit(mmc_exit);
+MODULE_DESCRIPTION("MMC core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 0c4de2030b3f..a028b48be164 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -31,6 +31,7 @@ struct mmc_bus_ops {
int (*sw_reset)(struct mmc_host *);
bool (*cache_enabled)(struct mmc_host *);
int (*flush_cache)(struct mmc_host *);
+ int (*handle_undervoltage)(struct mmc_host *host);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -59,6 +60,10 @@ void mmc_power_off(struct mmc_host *host);
void mmc_power_cycle(struct mmc_host *host, u32 ocr);
void mmc_set_initial_state(struct mmc_host *host);
u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
+int mmc_handle_undervoltage(struct mmc_host *host);
+void mmc_regulator_register_undervoltage_notifier(struct mmc_host *host);
+void mmc_regulator_unregister_undervoltage_notifier(struct mmc_host *host);
+void mmc_undervoltage_workfn(struct work_struct *work);
static inline void mmc_delay(unsigned int ms)
{
@@ -70,6 +75,7 @@ static inline void mmc_delay(unsigned int ms)
void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
+void __mmc_stop_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
@@ -80,16 +86,32 @@ int mmc_detect_card_removed(struct mmc_host *host);
int mmc_attach_mmc(struct mmc_host *host);
int mmc_attach_sd(struct mmc_host *host);
int mmc_attach_sdio(struct mmc_host *host);
+int mmc_attach_sd_uhs2(struct mmc_host *host);
/* Module parameters */
extern bool use_spi_crc;
/* Debugfs information for hosts and cards */
+#ifdef CONFIG_DEBUG_FS
void mmc_add_host_debugfs(struct mmc_host *host);
void mmc_remove_host_debugfs(struct mmc_host *host);
void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card);
+#else
+static inline void mmc_add_host_debugfs(struct mmc_host *host)
+{
+}
+static inline void mmc_remove_host_debugfs(struct mmc_host *host)
+{
+}
+static inline void mmc_add_card_debugfs(struct mmc_card *card)
+{
+}
+static inline void mmc_remove_card_debugfs(struct mmc_card *card)
+{
+}
+#endif
int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
@@ -100,15 +122,14 @@ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
-int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
- unsigned int arg);
-int mmc_can_erase(struct mmc_card *card);
-int mmc_can_trim(struct mmc_card *card);
-int mmc_can_discard(struct mmc_card *card);
-int mmc_can_sanitize(struct mmc_card *card);
-int mmc_can_secure_erase_trim(struct mmc_card *card);
-int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
- unsigned int nr);
+int mmc_erase(struct mmc_card *card, sector_t from, unsigned int nr, unsigned int arg);
+bool mmc_card_can_erase(struct mmc_card *card);
+bool mmc_card_can_trim(struct mmc_card *card);
+bool mmc_card_can_discard(struct mmc_card *card);
+bool mmc_card_can_sanitize(struct mmc_card *card);
+bool mmc_card_can_secure_erase_trim(struct mmc_card *card);
+bool mmc_card_can_cmd23(struct mmc_card *card);
+int mmc_erase_group_aligned(struct mmc_card *card, sector_t from, unsigned int nr);
unsigned int mmc_calc_max_discard(struct mmc_card *card);
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
@@ -119,6 +140,8 @@ void mmc_release_host(struct mmc_host *host);
void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx);
void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx);
+int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *sector);
+
/**
* mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
@@ -181,4 +204,14 @@ static inline int mmc_flush_cache(struct mmc_host *host)
return 0;
}
+static inline unsigned int mmc_sector_div(sector_t dividend, u32 divisor)
+{
+ return div_u64(dividend, divisor);
+}
+
+static inline unsigned int mmc_sector_mod(sector_t dividend, u32 divisor)
+{
+ return sector_div(dividend, divisor);
+}
+
#endif
diff --git a/drivers/mmc/core/crypto.c b/drivers/mmc/core/crypto.c
index 419a368f8402..fec4fbf16a5b 100644
--- a/drivers/mmc/core/crypto.c
+++ b/drivers/mmc/core/crypto.c
@@ -16,13 +16,13 @@ void mmc_crypto_set_initial_state(struct mmc_host *host)
{
/* Reset might clear all keys, so reprogram all the keys. */
if (host->caps2 & MMC_CAP2_CRYPTO)
- blk_ksm_reprogram_all_keys(&host->ksm);
+ blk_crypto_reprogram_all_keys(&host->crypto_profile);
}
void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host)
{
if (host->caps2 & MMC_CAP2_CRYPTO)
- blk_ksm_register(&host->ksm, q);
+ blk_crypto_register(&host->crypto_profile, q);
}
EXPORT_SYMBOL_GPL(mmc_crypto_setup_queue);
@@ -30,19 +30,15 @@ void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq)
{
struct request *req = mmc_queue_req_to_req(mqrq);
struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct blk_crypto_keyslot *keyslot;
- if (!req->crypt_keyslot)
+ if (!req->crypt_ctx)
return;
- mrq->crypto_enabled = true;
- mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot);
+ mrq->crypto_ctx = req->crypt_ctx;
- /*
- * For now we assume that all MMC drivers set max_dun_bytes_supported=4,
- * which is the limit for CQHCI crypto. So all DUNs should be 32-bit.
- */
- WARN_ON_ONCE(req->crypt_ctx->bc_dun[0] > U32_MAX);
-
- mrq->data_unit_num = req->crypt_ctx->bc_dun[0];
+ keyslot = req->crypt_keyslot;
+ if (keyslot)
+ mrq->crypto_key_slot = blk_crypto_keyslot_index(keyslot);
}
EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 3fdbc801e64a..91ea00a0f61d 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -12,9 +12,12 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/fault-inject.h>
+#include <linux/time.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
#include "core.h"
#include "card.h"
@@ -223,6 +226,130 @@ static int mmc_clock_opt_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
"%llu\n");
+static int mmc_err_state_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+ int i;
+
+ if (!host)
+ return -EINVAL;
+
+ *val = 0;
+ for (i = 0; i < MMC_ERR_MAX; i++) {
+ if (host->err_stats[i]) {
+ *val = 1;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_err_state, mmc_err_state_get, NULL, "%llu\n");
+
+static int mmc_err_stats_show(struct seq_file *file, void *data)
+{
+ struct mmc_host *host = file->private;
+ const char *desc[MMC_ERR_MAX] = {
+ [MMC_ERR_CMD_TIMEOUT] = "Command Timeout Occurred",
+ [MMC_ERR_CMD_CRC] = "Command CRC Errors Occurred",
+ [MMC_ERR_DAT_TIMEOUT] = "Data Timeout Occurred",
+ [MMC_ERR_DAT_CRC] = "Data CRC Errors Occurred",
+ [MMC_ERR_AUTO_CMD] = "Auto-Cmd Error Occurred",
+ [MMC_ERR_ADMA] = "ADMA Error Occurred",
+ [MMC_ERR_TUNING] = "Tuning Error Occurred",
+ [MMC_ERR_CMDQ_RED] = "CMDQ RED Errors",
+ [MMC_ERR_CMDQ_GCE] = "CMDQ GCE Errors",
+ [MMC_ERR_CMDQ_ICCE] = "CMDQ ICCE Errors",
+ [MMC_ERR_REQ_TIMEOUT] = "Request Timedout",
+ [MMC_ERR_CMDQ_REQ_TIMEOUT] = "CMDQ Request Timedout",
+ [MMC_ERR_ICE_CFG] = "ICE Config Errors",
+ [MMC_ERR_CTRL_TIMEOUT] = "Controller Timedout errors",
+ [MMC_ERR_UNEXPECTED_IRQ] = "Unexpected IRQ errors",
+ };
+ int i;
+
+ for (i = 0; i < MMC_ERR_MAX; i++) {
+ if (desc[i])
+ seq_printf(file, "# %s:\t %d\n",
+ desc[i], host->err_stats[i]);
+ }
+
+ return 0;
+}
+
+static int mmc_err_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmc_err_stats_show, inode->i_private);
+}
+
+static ssize_t mmc_err_stats_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mmc_host *host = filp->f_mapping->host->i_private;
+
+ pr_debug("%s: Resetting MMC error statistics\n", __func__);
+ memset(host->err_stats, 0, sizeof(host->err_stats));
+
+ return cnt;
+}
+
+static const struct file_operations mmc_err_stats_fops = {
+ .open = mmc_err_stats_open,
+ .read = seq_read,
+ .write = mmc_err_stats_write,
+ .release = single_release,
+};
+
+static int mmc_caps_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+
+static int mmc_caps_set(void *data, u64 val)
+{
+ u32 *caps = data;
+ u32 diff = *caps ^ val;
+ u32 allowed = MMC_CAP_AGGRESSIVE_PM |
+ MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_UHS |
+ MMC_CAP_DDR |
+ MMC_CAP_4_BIT_DATA |
+ MMC_CAP_8_BIT_DATA |
+ MMC_CAP_CMD23;
+
+ if (diff & ~allowed)
+ return -EINVAL;
+
+ *caps = val;
+
+ return 0;
+}
+
+static int mmc_caps2_set(void *data, u64 val)
+{
+ u32 allowed = MMC_CAP2_HSX00_1_8V |
+ MMC_CAP2_HSX00_1_2V |
+ MMC_CAP2_CQE |
+ MMC_CAP2_CQE_DCMD;
+ u32 *caps = data;
+ u32 diff = *caps ^ val;
+
+ if (diff & ~allowed)
+ return -EINVAL;
+
+ *caps = val;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_caps_fops, mmc_caps_get, mmc_caps_set,
+ "0x%08llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_caps2_fops, mmc_caps_get, mmc_caps2_set,
+ "0x%08llx\n");
+
void mmc_add_host_debugfs(struct mmc_host *host)
{
struct dentry *root;
@@ -230,12 +357,18 @@ void mmc_add_host_debugfs(struct mmc_host *host)
root = debugfs_create_dir(mmc_hostname(host), NULL);
host->debugfs_root = root;
- debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops);
- debugfs_create_x32("caps", S_IRUSR, root, &host->caps);
- debugfs_create_x32("caps2", S_IRUSR, root, &host->caps2);
- debugfs_create_file_unsafe("clock", S_IRUSR | S_IWUSR, root, host,
+ debugfs_create_file("ios", 0400, root, host, &mmc_ios_fops);
+ debugfs_create_file("caps", 0600, root, &host->caps, &mmc_caps_fops);
+ debugfs_create_file("caps2", 0600, root, &host->caps2,
+ &mmc_caps2_fops);
+ debugfs_create_file_unsafe("clock", 0600, root, host,
&mmc_clock_fops);
+ debugfs_create_file_unsafe("err_state", 0600, root, host,
+ &mmc_err_state);
+ debugfs_create_file("err_stats", 0600, root, host,
+ &mmc_err_stats_fops);
+
#ifdef CONFIG_FAIL_MMC_REQUEST
if (fail_request)
setup_fault_attr(&fail_default_attr, fail_request);
@@ -261,7 +394,8 @@ void mmc_add_card_debugfs(struct mmc_card *card)
root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
card->debugfs_root = root;
- debugfs_create_x32("state", S_IRUSR, root, &card->state);
+ debugfs_create_x32("state", 0400, root, &card->state);
+ debugfs_create_x32("quirks", 0400, root, &card->quirks);
}
void mmc_remove_card_debugfs(struct mmc_card *card)
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index eda4a1892c33..88c95dbfd9cf 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -13,9 +13,7 @@
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pagemap.h>
-#include <linux/pm_wakeup.h>
#include <linux/export.h>
#include <linux/leds.h>
#include <linux/slab.h>
@@ -75,13 +73,23 @@ static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
wakeup_source_unregister(host->ws);
- ida_simple_remove(&mmc_host_ida, host->index);
+ if (of_alias_get_id(host->parent->of_node, "mmc") < 0)
+ ida_free(&mmc_host_ida, host->index);
kfree(host);
}
-static struct class mmc_host_class = {
+static int mmc_host_classdev_shutdown(struct device *dev)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ __mmc_stop_host(host);
+ return 0;
+}
+
+static const struct class mmc_host_class = {
.name = "mmc_host",
.dev_release = mmc_host_classdev_release,
+ .shutdown_pre = mmc_host_classdev_shutdown,
.pm = MMC_HOST_CLASS_DEV_PM_OPS,
};
@@ -95,6 +103,10 @@ void mmc_unregister_host_class(void)
class_unregister(&mmc_host_class);
}
+/**
+ * mmc_retune_enable() - enter a transfer mode that requires retuning
+ * @host: host which should retune now
+ */
void mmc_retune_enable(struct mmc_host *host)
{
host->can_retune = 1;
@@ -105,13 +117,12 @@ void mmc_retune_enable(struct mmc_host *host)
/*
* Pause re-tuning for a small set of operations. The pause begins after the
- * next command and after first doing re-tuning.
+ * next command.
*/
void mmc_retune_pause(struct mmc_host *host)
{
if (!host->retune_paused) {
host->retune_paused = 1;
- mmc_retune_needed(host);
mmc_retune_hold(host);
}
}
@@ -126,18 +137,23 @@ void mmc_retune_unpause(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_retune_unpause);
+/**
+ * mmc_retune_disable() - exit a transfer mode that requires retuning
+ * @host: host which should not retune anymore
+ *
+ * It is not meant for temporarily preventing retuning!
+ */
void mmc_retune_disable(struct mmc_host *host)
{
mmc_retune_unpause(host);
host->can_retune = 0;
- del_timer_sync(&host->retune_timer);
- host->retune_now = 0;
- host->need_retune = 0;
+ timer_delete_sync(&host->retune_timer);
+ mmc_retune_clear(host);
}
void mmc_retune_timer_stop(struct mmc_host *host)
{
- del_timer_sync(&host->retune_timer);
+ timer_delete_sync(&host->retune_timer);
}
EXPORT_SYMBOL(mmc_retune_timer_stop);
@@ -196,7 +212,7 @@ out:
static void mmc_retune_timer(struct timer_list *t)
{
- struct mmc_host *host = from_timer(host, t, retune_timer);
+ struct mmc_host *host = timer_container_of(host, t, retune_timer);
mmc_retune_needed(host);
}
@@ -216,10 +232,8 @@ static void mmc_of_parse_timing_phase(struct device *dev, const char *prop,
}
void
-mmc_of_parse_clk_phase(struct mmc_host *host, struct mmc_clk_phase_map *map)
+mmc_of_parse_clk_phase(struct device *dev, struct mmc_clk_phase_map *map)
{
- struct device *dev = host->parent;
-
mmc_of_parse_timing_phase(dev, "clk-phase-legacy",
&map->phase[MMC_TIMING_LEGACY]);
mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs",
@@ -250,7 +264,7 @@ EXPORT_SYMBOL(mmc_of_parse_clk_phase);
* @host: host whose properties should be parsed.
*
* To keep the rest of the MMC subsystem unaware of whether DT has been
- * used to to instantiate and configure this host instance or not, we
+ * used to instantiate and configure this host instance or not, we
* parse the properties and set respective generic mmc-host flags and
* parameters.
*/
@@ -288,6 +302,8 @@ int mmc_of_parse(struct mmc_host *host)
/* f_max is obtained from the optional "max-frequency" property */
device_property_read_u32(dev, "max-frequency", &host->f_max);
+ device_property_read_u32(dev, "max-sd-hs-hz", &host->max_sd_hs_hz);
+
/*
* Configure CD and WP pins. They are both by default active low to
* match the SDHCI spec. If GPIOs are provided for CD and / or WP, the
@@ -502,7 +518,7 @@ static int mmc_first_nonreserved_index(void)
*/
struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
{
- int err;
+ int index;
struct mmc_host *host;
int alias_id, min_idx, max_idx;
@@ -515,20 +531,20 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
alias_id = of_alias_get_id(dev->of_node, "mmc");
if (alias_id >= 0) {
- min_idx = alias_id;
- max_idx = alias_id + 1;
+ index = alias_id;
} else {
min_idx = mmc_first_nonreserved_index();
max_idx = 0;
- }
- err = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
- if (err < 0) {
- kfree(host);
- return NULL;
+ index = ida_alloc_range(&mmc_host_ida, min_idx, max_idx - 1,
+ GFP_KERNEL);
+ if (index < 0) {
+ kfree(host);
+ return NULL;
+ }
}
- host->index = err;
+ host->index = index;
dev_set_name(&host->class_dev, "mmc%d", host->index);
host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev));
@@ -547,9 +563,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
- INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
+ INIT_WORK(&host->sdio_irq_work, sdio_irq_work);
timer_setup(&host->retune_timer, mmc_retune_timer, 0);
+ INIT_WORK(&host->supply.uv_work, mmc_undervoltage_workfn);
+
/*
* By default, hosts do not support SGIO or large requests.
* They have to set these according to their abilities.
@@ -570,6 +588,51 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
EXPORT_SYMBOL(mmc_alloc_host);
+static void devm_mmc_host_release(struct device *dev, void *res)
+{
+ mmc_free_host(*(struct mmc_host **)res);
+}
+
+struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra)
+{
+ struct mmc_host **dr, *host;
+
+ dr = devres_alloc(devm_mmc_host_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return NULL;
+
+ host = mmc_alloc_host(extra, dev);
+ if (!host) {
+ devres_free(dr);
+ return NULL;
+ }
+
+ *dr = host;
+ devres_add(dev, dr);
+
+ return host;
+}
+EXPORT_SYMBOL(devm_mmc_alloc_host);
+
+static int mmc_validate_host_caps(struct mmc_host *host)
+{
+ struct device *dev = host->parent;
+ u32 caps = host->caps, caps2 = host->caps2;
+
+ if (caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) {
+ dev_warn(dev, "missing ->enable_sdio_irq() ops\n");
+ return -EINVAL;
+ }
+
+ if (caps2 & (MMC_CAP2_HS400_ES | MMC_CAP2_HS400) &&
+ !(caps & MMC_CAP_8_BIT_DATA) && !(caps2 & MMC_CAP2_NO_MMC)) {
+ dev_warn(dev, "drop HS400 support since no 8-bit bus\n");
+ host->caps2 = caps2 & ~MMC_CAP2_HS400_ES & ~MMC_CAP2_HS400;
+ }
+
+ return 0;
+}
+
/**
* mmc_add_host - initialise host hardware
* @host: mmc host
@@ -582,8 +645,9 @@ int mmc_add_host(struct mmc_host *host)
{
int err;
- WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
- !host->ops->enable_sdio_irq);
+ err = mmc_validate_host_caps(host);
+ if (err)
+ return err;
err = device_add(&host->class_dev);
if (err)
@@ -591,9 +655,7 @@ int mmc_add_host(struct mmc_host *host)
led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
-#ifdef CONFIG_DEBUG_FS
mmc_add_host_debugfs(host);
-#endif
mmc_start_host(host);
return 0;
@@ -613,9 +675,7 @@ void mmc_remove_host(struct mmc_host *host)
{
mmc_stop_host(host);
-#ifdef CONFIG_DEBUG_FS
mmc_remove_host_debugfs(host);
-#endif
device_del(&host->class_dev);
@@ -632,6 +692,7 @@ EXPORT_SYMBOL(mmc_remove_host);
*/
void mmc_free_host(struct mmc_host *host)
{
+ cancel_delayed_work_sync(&host->detect);
mmc_pwrseq_free(host);
put_device(&host->class_dev);
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index ba407617ed23..5941d68ff989 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -21,6 +21,12 @@ int mmc_retune(struct mmc_host *host);
void mmc_retune_pause(struct mmc_host *host);
void mmc_retune_unpause(struct mmc_host *host);
+static inline void mmc_retune_clear(struct mmc_host *host)
+{
+ host->retune_now = 0;
+ host->need_retune = 0;
+}
+
static inline void mmc_retune_hold_now(struct mmc_host *host)
{
host->retune_now = 0;
@@ -33,22 +39,22 @@ static inline void mmc_retune_recheck(struct mmc_host *host)
host->retune_now = 1;
}
-static inline int mmc_host_cmd23(struct mmc_host *host)
+static inline int mmc_host_can_cmd23(struct mmc_host *host)
{
return host->caps & MMC_CAP_CMD23;
}
-static inline bool mmc_host_done_complete(struct mmc_host *host)
+static inline bool mmc_host_can_done_complete(struct mmc_host *host)
{
return host->caps & MMC_CAP_DONE_COMPLETE;
}
-static inline int mmc_boot_partition_access(struct mmc_host *host)
+static inline int mmc_host_can_access_boot(struct mmc_host *host)
{
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
}
-static inline int mmc_host_uhs(struct mmc_host *host)
+static inline int mmc_host_can_uhs(struct mmc_host *host)
{
return host->caps &
(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 838726b68ff3..7c86efb1044a 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -11,7 +11,10 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/string.h>
#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -30,6 +33,13 @@
#define MIN_CACHE_EN_TIMEOUT_MS 1600
#define CACHE_FLUSH_TIMEOUT_MS 30000 /* 30s */
+enum mmc_poweroff_type {
+ MMC_POWEROFF_SUSPEND,
+ MMC_POWEROFF_SHUTDOWN,
+ MMC_POWEROFF_UNDERVOLTAGE,
+ MMC_POWEROFF_UNBIND,
+};
+
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
@@ -49,20 +59,6 @@ static const unsigned int taac_mant[] = {
35, 40, 45, 50, 55, 60, 70, 80,
};
-#define UNSTUFF_BITS(resp,start,size) \
- ({ \
- const int __size = size; \
- const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
- const int __off = 3 - ((start) / 32); \
- const int __shft = (start) & 31; \
- u32 __res; \
- \
- __res = resp[__off] >> __shft; \
- if (__size + __shft > 32) \
- __res |= resp[__off-1] << ((32 - __shft) % 32); \
- __res & __mask; \
- })
-
/*
* Given the decoded CSD structure, decode the raw CID to our CID structure.
*/
@@ -71,42 +67,48 @@ static int mmc_decode_cid(struct mmc_card *card)
u32 *resp = card->raw_cid;
/*
+ * Add the raw card ID (cid) data to the entropy pool. It doesn't
+ * matter that not all of it is unique, it's just bonus entropy.
+ */
+ add_device_randomness(&card->raw_cid, sizeof(card->raw_cid));
+
+ /*
* The selection of the format here is based upon published
- * specs from sandisk and from what people have reported.
+ * specs from SanDisk and from what people have reported.
*/
switch (card->csd.mmca_vsn) {
case 0: /* MMC v1.0 - v1.2 */
case 1: /* MMC v1.4 */
- card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
- card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
- card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
- card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
- card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
- card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
- card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
- card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
- card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
- card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
- card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
- card->cid.month = UNSTUFF_BITS(resp, 12, 4);
- card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
+ card->cid.manfid = unstuff_bits(resp, 104, 24);
+ card->cid.prod_name[0] = unstuff_bits(resp, 96, 8);
+ card->cid.prod_name[1] = unstuff_bits(resp, 88, 8);
+ card->cid.prod_name[2] = unstuff_bits(resp, 80, 8);
+ card->cid.prod_name[3] = unstuff_bits(resp, 72, 8);
+ card->cid.prod_name[4] = unstuff_bits(resp, 64, 8);
+ card->cid.prod_name[5] = unstuff_bits(resp, 56, 8);
+ card->cid.prod_name[6] = unstuff_bits(resp, 48, 8);
+ card->cid.hwrev = unstuff_bits(resp, 44, 4);
+ card->cid.fwrev = unstuff_bits(resp, 40, 4);
+ card->cid.serial = unstuff_bits(resp, 16, 24);
+ card->cid.month = unstuff_bits(resp, 12, 4);
+ card->cid.year = unstuff_bits(resp, 8, 4) + 1997;
break;
case 2: /* MMC v2.0 - v2.2 */
case 3: /* MMC v3.1 - v3.3 */
case 4: /* MMC v4 */
- card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
- card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
- card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
- card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
- card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
- card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
- card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
- card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
- card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
- card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
- card->cid.month = UNSTUFF_BITS(resp, 12, 4);
- card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
+ card->cid.manfid = unstuff_bits(resp, 120, 8);
+ card->cid.oemid = unstuff_bits(resp, 104, 16);
+ card->cid.prod_name[0] = unstuff_bits(resp, 96, 8);
+ card->cid.prod_name[1] = unstuff_bits(resp, 88, 8);
+ card->cid.prod_name[2] = unstuff_bits(resp, 80, 8);
+ card->cid.prod_name[3] = unstuff_bits(resp, 72, 8);
+ card->cid.prod_name[4] = unstuff_bits(resp, 64, 8);
+ card->cid.prod_name[5] = unstuff_bits(resp, 56, 8);
+ card->cid.prv = unstuff_bits(resp, 48, 8);
+ card->cid.serial = unstuff_bits(resp, 16, 32);
+ card->cid.month = unstuff_bits(resp, 12, 4);
+ card->cid.year = unstuff_bits(resp, 8, 4) + 1997;
break;
default:
@@ -115,6 +117,9 @@ static int mmc_decode_cid(struct mmc_card *card)
return -EINVAL;
}
+ /* some product names include trailing whitespace */
+ strim(card->cid.prod_name);
+
return 0;
}
@@ -128,6 +133,17 @@ static void mmc_set_erase_size(struct mmc_card *card)
mmc_init_erase(card);
}
+
+static void mmc_set_wp_grp_size(struct mmc_card *card)
+{
+ if (card->ext_csd.erase_group_def & 1)
+ card->wp_grp_size = card->ext_csd.hc_erase_size *
+ card->ext_csd.raw_hc_erase_gap_size;
+ else
+ card->wp_grp_size = card->csd.erase_size *
+ (card->csd.wp_grp_size + 1);
+}
+
/*
* Given a 128-bit response, decode to our card CSD structure.
*/
@@ -142,42 +158,43 @@ static int mmc_decode_csd(struct mmc_card *card)
* v1.2 has extra information in bits 15, 11 and 10.
* We also support eMMC v4.4 & v4.41.
*/
- csd->structure = UNSTUFF_BITS(resp, 126, 2);
+ csd->structure = unstuff_bits(resp, 126, 2);
if (csd->structure == 0) {
pr_err("%s: unrecognised CSD structure version %d\n",
mmc_hostname(card->host), csd->structure);
return -EINVAL;
}
- csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
- m = UNSTUFF_BITS(resp, 115, 4);
- e = UNSTUFF_BITS(resp, 112, 3);
+ csd->mmca_vsn = unstuff_bits(resp, 122, 4);
+ m = unstuff_bits(resp, 115, 4);
+ e = unstuff_bits(resp, 112, 3);
csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10;
- csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
+ csd->taac_clks = unstuff_bits(resp, 104, 8) * 100;
- m = UNSTUFF_BITS(resp, 99, 4);
- e = UNSTUFF_BITS(resp, 96, 3);
+ m = unstuff_bits(resp, 99, 4);
+ e = unstuff_bits(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
- csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
+ csd->cmdclass = unstuff_bits(resp, 84, 12);
- e = UNSTUFF_BITS(resp, 47, 3);
- m = UNSTUFF_BITS(resp, 62, 12);
+ e = unstuff_bits(resp, 47, 3);
+ m = unstuff_bits(resp, 62, 12);
csd->capacity = (1 + m) << (e + 2);
- csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
- csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
- csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
- csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
- csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
- csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
- csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
- csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
+ csd->read_blkbits = unstuff_bits(resp, 80, 4);
+ csd->read_partial = unstuff_bits(resp, 79, 1);
+ csd->write_misalign = unstuff_bits(resp, 78, 1);
+ csd->read_misalign = unstuff_bits(resp, 77, 1);
+ csd->dsr_imp = unstuff_bits(resp, 76, 1);
+ csd->r2w_factor = unstuff_bits(resp, 26, 3);
+ csd->write_blkbits = unstuff_bits(resp, 22, 4);
+ csd->write_partial = unstuff_bits(resp, 21, 1);
if (csd->write_blkbits >= 9) {
- a = UNSTUFF_BITS(resp, 42, 5);
- b = UNSTUFF_BITS(resp, 37, 5);
+ a = unstuff_bits(resp, 42, 5);
+ b = unstuff_bits(resp, 37, 5);
csd->erase_size = (a + 1) * (b + 1);
csd->erase_size <<= csd->write_blkbits - 9;
+ csd->wp_grp_size = unstuff_bits(resp, 32, 5);
}
return 0;
@@ -411,13 +428,14 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
- mmc_select_card_type(card);
card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.raw_erase_timeout_mult =
ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
card->ext_csd.raw_hc_erase_grp_size =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ card->ext_csd.raw_boot_mult =
+ ext_csd[EXT_CSD_BOOT_MULT];
if (card->ext_csd.rev >= 3) {
u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
@@ -442,7 +460,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
* There are two boot regions of equal size, defined in
* multiples of 128K.
*/
- if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
+ if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_host_can_access_boot(card->host)) {
for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
mmc_part_add(card, part_size,
@@ -561,7 +579,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
* RPMB regions are defined in multiples of 128K.
*/
card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
- if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
+ if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_can_cmd23(card->host)) {
mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
EXT_CSD_PART_CONFIG_ACC_RPMB,
"rpmb", 0, false,
@@ -604,11 +622,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
} else {
card->ext_csd.data_tag_unit_size = 0;
}
-
- card->ext_csd.max_packed_writes =
- ext_csd[EXT_CSD_MAX_PACKED_WRITES];
- card->ext_csd.max_packed_reads =
- ext_csd[EXT_CSD_MAX_PACKED_READS];
} else {
card->ext_csd.data_sector_size = 512;
}
@@ -668,7 +681,7 @@ static int mmc_read_ext_csd(struct mmc_card *card)
u8 *ext_csd;
int err;
- if (!mmc_can_ext_csd(card))
+ if (!mmc_card_can_ext_csd(card))
return 0;
err = mmc_get_ext_csd(card, &ext_csd);
@@ -781,6 +794,7 @@ MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
+MMC_DEV_ATTR(wp_grp_size, "%u\n", card->wp_grp_size << 9);
MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
@@ -810,15 +824,14 @@ static ssize_t mmc_fwrev_show(struct device *dev,
{
struct mmc_card *card = mmc_dev_to_card(dev);
- if (card->ext_csd.rev < 7) {
- return sprintf(buf, "0x%x\n", card->cid.fwrev);
- } else {
- return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
- card->ext_csd.fwrev);
- }
+ if (card->ext_csd.rev < 7)
+ return sysfs_emit(buf, "0x%x\n", card->cid.fwrev);
+ else
+ return sysfs_emit(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
+ card->ext_csd.fwrev);
}
-static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
+static DEVICE_ATTR(fwrev, 0444, mmc_fwrev_show, NULL);
static ssize_t mmc_dsr_show(struct device *dev,
struct device_attribute *attr,
@@ -828,13 +841,13 @@ static ssize_t mmc_dsr_show(struct device *dev,
struct mmc_host *host = card->host;
if (card->csd.dsr_imp && host->dsr_req)
- return sprintf(buf, "0x%x\n", host->dsr);
+ return sysfs_emit(buf, "0x%x\n", host->dsr);
else
/* return default DSR value */
- return sprintf(buf, "0x%x\n", 0x404);
+ return sysfs_emit(buf, "0x%x\n", 0x404);
}
-static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
+static DEVICE_ATTR(dsr, 0444, mmc_dsr_show, NULL);
static struct attribute *mmc_std_attrs[] = {
&dev_attr_cid.attr,
@@ -842,6 +855,7 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_date.attr,
&dev_attr_erase_size.attr,
&dev_attr_preferred_erase_size.attr,
+ &dev_attr_wp_grp_size.attr,
&dev_attr_fwrev.attr,
&dev_attr_ffu_capable.attr,
&dev_attr_hwrev.attr,
@@ -866,7 +880,7 @@ static struct attribute *mmc_std_attrs[] = {
};
ATTRIBUTE_GROUPS(mmc_std);
-static struct device_type mmc_type = {
+static const struct device_type mmc_type = {
.groups = mmc_std_groups,
};
@@ -946,7 +960,7 @@ static int mmc_select_powerclass(struct mmc_card *card)
int err, ddr;
/* Power class selection is supported for versions >= 4.0 */
- if (!mmc_can_ext_csd(card))
+ if (!mmc_card_can_ext_csd(card))
return 0;
bus_width = host->ios.bus_width;
@@ -998,16 +1012,18 @@ static int mmc_select_bus_width(struct mmc_card *card)
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
+ EXT_CSD_BUS_WIDTH_1,
};
static unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
+ MMC_BUS_WIDTH_1,
};
struct mmc_host *host = card->host;
unsigned idx, bus_width = 0;
int err = 0;
- if (!mmc_can_ext_csd(card) ||
+ if (!mmc_card_can_ext_csd(card) ||
!(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
@@ -1222,6 +1238,14 @@ static int mmc_select_hs400(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
mmc_set_bus_speed(card);
+ if (host->ops->execute_hs400_tuning) {
+ mmc_retune_disable(host);
+ err = host->ops->execute_hs400_tuning(host, card);
+ mmc_retune_enable(host);
+ if (err)
+ goto out_err;
+ }
+
if (host->ops->hs400_complete)
host->ops->hs400_complete(host);
@@ -1345,11 +1369,6 @@ static int mmc_select_hs400es(struct mmc_card *card)
int err = -EINVAL;
u8 val;
- if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
- err = -ENOTSUPP;
- goto out_err;
- }
-
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
@@ -1379,13 +1398,17 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ /*
+ * Bump to HS timing and frequency. Some cards don't handle
+ * SEND_STATUS reliably at the initial frequency.
+ */
mmc_set_timing(host, MMC_TIMING_MMC_HS);
+ mmc_set_bus_speed(card);
+
err = mmc_switch_status(card, true);
if (err)
goto out_err;
- mmc_set_clock(host, card->ext_csd.hs_max_dtr);
-
/* Switch card to DDR with strobe bit */
val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1443,7 +1466,7 @@ out_err:
static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- unsigned int old_timing, old_signal_voltage;
+ unsigned int old_timing, old_signal_voltage, old_clock;
int err = -EINVAL;
u8 val;
@@ -1474,8 +1497,17 @@ static int mmc_select_hs200(struct mmc_card *card)
false, true, MMC_CMD_RETRIES);
if (err)
goto err;
+
+ /*
+ * Bump to HS timing and frequency. Some cards don't handle
+ * SEND_STATUS reliably at the initial frequency.
+ * NB: We can't move to full (HS200) speeds until after we've
+ * successfully switched over.
+ */
old_timing = host->ios.timing;
+ old_clock = host->ios.clock;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+ mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
/*
* For HS200, CRC errors are not a reliable way to know the
@@ -1488,8 +1520,10 @@ static int mmc_select_hs200(struct mmc_card *card)
* mmc_select_timing() assumes timing has not changed if
* it is a switch error.
*/
- if (err == -EBADMSG)
+ if (err == -EBADMSG) {
+ mmc_set_clock(host, old_clock);
mmc_set_timing(host, old_timing);
+ }
}
err:
if (err) {
@@ -1510,16 +1544,26 @@ static int mmc_select_timing(struct mmc_card *card)
{
int err = 0;
- if (!mmc_can_ext_csd(card))
+ if (!mmc_card_can_ext_csd(card))
goto bus_speed;
- if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES) {
err = mmc_select_hs400es(card);
- else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
+ goto out;
+ }
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) {
err = mmc_select_hs200(card);
- else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
+ if (err == -EBADMSG)
+ card->mmc_avail_type &= ~EXT_CSD_CARD_TYPE_HS200;
+ else
+ goto out;
+ }
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
err = mmc_select_hs(card);
+out:
if (err && err != -EBADMSG)
return err;
@@ -1695,6 +1739,12 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_set_erase_size(card);
}
+ /*
+ * Reselect the card type since host caps could have been changed when
+ * debugging even if the card is not new.
+ */
+ mmc_select_card_type(card);
+
/* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
if (card->ext_csd.rev >= 3) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1722,7 +1772,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_set_erase_size(card);
}
}
-
+ mmc_set_wp_grp_size(card);
/*
* Ensure eMMC user default partition is enabled
*/
@@ -1755,9 +1805,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/* set erase_arg */
- if (mmc_can_discard(card))
+ if (mmc_card_can_discard(card))
card->erase_arg = MMC_DISCARD_ARG;
- else if (mmc_can_trim(card))
+ else if (mmc_card_can_trim(card))
card->erase_arg = MMC_TRIM_ARG;
else
card->erase_arg = MMC_ERASE_ARG;
@@ -1780,8 +1830,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto free_card;
-
- } else if (!mmc_card_hs400es(card)) {
+ } else if (mmc_card_hs400es(card)) {
+ if (host->ops->execute_hs400_tuning) {
+ err = host->ops->execute_hs400_tuning(host, card);
+ if (err)
+ goto free_card;
+ }
+ } else {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
if (err > 0 && mmc_card_hs(card)) {
@@ -1901,7 +1956,7 @@ err:
return err;
}
-static int mmc_can_sleep(struct mmc_card *card)
+static bool mmc_card_can_sleep(struct mmc_card *card)
{
return card->ext_csd.rev >= 3;
}
@@ -1952,20 +2007,33 @@ static int mmc_sleep(struct mmc_host *host)
goto out_release;
}
- err = __mmc_poll_for_busy(card, timeout_ms, &mmc_sleep_busy_cb, host);
+ err = __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_sleep_busy_cb, host);
out_release:
mmc_retune_release(host);
return err;
}
-static int mmc_can_poweroff_notify(const struct mmc_card *card)
+static bool mmc_card_can_poweroff_notify(const struct mmc_card *card)
{
return card &&
mmc_card_mmc(card) &&
(card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
}
+static bool mmc_host_can_poweroff_notify(const struct mmc_host *host,
+ enum mmc_poweroff_type pm_type)
+{
+ if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE)
+ return true;
+
+ if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND &&
+ pm_type == MMC_POWEROFF_SUSPEND)
+ return true;
+
+ return pm_type == MMC_POWEROFF_SHUTDOWN;
+}
+
static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
{
unsigned int timeout = card->ext_csd.generic_cmd6_time;
@@ -1989,15 +2057,6 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
}
/*
- * Host is being removed. Free up the current card.
- */
-static void mmc_remove(struct mmc_host *host)
-{
- mmc_remove_card(host->card);
- host->card = NULL;
-}
-
-/*
* Card detection - card is alive.
*/
static int mmc_alive(struct mmc_host *host)
@@ -2022,7 +2081,8 @@ static void mmc_detect(struct mmc_host *host)
mmc_put_card(host->card, NULL);
if (err) {
- mmc_remove(host);
+ mmc_remove_card(host->card);
+ host->card = NULL;
mmc_claim_host(host);
mmc_detach_bus(host);
@@ -2044,38 +2104,49 @@ static int _mmc_flush_cache(struct mmc_host *host)
{
int err = 0;
+ if (mmc_card_broken_cache_flush(host->card) && !host->card->written_flag)
+ return 0;
+
if (_mmc_cache_enabled(host)) {
err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1,
CACHE_FLUSH_TIMEOUT_MS);
if (err)
- pr_err("%s: cache flush error %d\n",
- mmc_hostname(host), err);
+ pr_err("%s: cache flush error %d\n", mmc_hostname(host), err);
+ else
+ host->card->written_flag = false;
}
return err;
}
-static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
+static int _mmc_suspend(struct mmc_host *host, enum mmc_poweroff_type pm_type)
{
+ unsigned int notify_type = EXT_CSD_POWER_OFF_SHORT;
int err = 0;
- unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
- EXT_CSD_POWER_OFF_LONG;
+
+ if (pm_type == MMC_POWEROFF_SHUTDOWN)
+ notify_type = EXT_CSD_POWER_OFF_LONG;
mmc_claim_host(host);
if (mmc_card_suspended(host->card))
goto out;
- err = _mmc_flush_cache(host);
- if (err)
- goto out;
+ /*
+ * For the undervoltage case, we care more about device integrity.
+ * Avoid cache flush and notify the device to power off quickly.
+ */
+ if (pm_type != MMC_POWEROFF_UNDERVOLTAGE) {
+ err = _mmc_flush_cache(host);
+ if (err)
+ goto out;
+ }
- if (mmc_can_poweroff_notify(host->card) &&
- ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend ||
- (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND)))
+ if (mmc_card_can_poweroff_notify(host->card) &&
+ mmc_host_can_poweroff_notify(host, pm_type))
err = mmc_poweroff_notify(host->card, notify_type);
- else if (mmc_can_sleep(host->card))
+ else if (mmc_card_can_sleep(host->card))
err = mmc_sleep(host);
else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
@@ -2090,13 +2161,27 @@ out:
}
/*
+ * Host is being removed. Free up the current card and do a graceful power-off.
+ */
+static void mmc_remove(struct mmc_host *host)
+{
+ get_device(&host->card->dev);
+ mmc_remove_card(host->card);
+
+ _mmc_suspend(host, MMC_POWEROFF_UNBIND);
+
+ put_device(&host->card->dev);
+ host->card = NULL;
+}
+
+/*
* Suspend callback
*/
static int mmc_suspend(struct mmc_host *host)
{
int err;
- err = _mmc_suspend(host, true);
+ err = _mmc_suspend(host, MMC_POWEROFF_SUSPEND);
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
@@ -2135,15 +2220,23 @@ static int mmc_shutdown(struct mmc_host *host)
int err = 0;
/*
- * In a specific case for poweroff notify, we need to resume the card
- * before we can shutdown it properly.
+ * In case of undervoltage, the card will be powered off (removed) by
+ * _mmc_handle_undervoltage()
+ */
+ if (mmc_card_removed(host->card))
+ return 0;
+
+ /*
+ * If the card remains suspended at this point and it was done by using
+ * the sleep-cmd (CMD5), we may need to re-initialize it first, to allow
+ * us to send the preferred poweroff-notification cmd at shutdown.
*/
- if (mmc_can_poweroff_notify(host->card) &&
- !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
+ if (mmc_card_can_poweroff_notify(host->card) &&
+ !mmc_host_can_poweroff_notify(host, MMC_POWEROFF_SUSPEND))
err = _mmc_resume(host);
if (!err)
- err = _mmc_suspend(host, false);
+ err = _mmc_suspend(host, MMC_POWEROFF_SHUTDOWN);
return err;
}
@@ -2167,7 +2260,7 @@ static int mmc_runtime_suspend(struct mmc_host *host)
if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
return 0;
- err = _mmc_suspend(host, true);
+ err = _mmc_suspend(host, MMC_POWEROFF_SUSPEND);
if (err)
pr_err("%s: error %d doing aggressive suspend\n",
mmc_hostname(host), err);
@@ -2190,14 +2283,12 @@ static int mmc_runtime_resume(struct mmc_host *host)
return 0;
}
-static int mmc_can_reset(struct mmc_card *card)
+static bool mmc_card_can_reset(struct mmc_card *card)
{
u8 rst_n_function;
rst_n_function = card->ext_csd.rst_n_function;
- if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
- return 0;
- return 1;
+ return ((rst_n_function & EXT_CSD_RST_N_EN_MASK) == EXT_CSD_RST_N_ENABLED);
}
static int _mmc_hw_reset(struct mmc_host *host)
@@ -2210,11 +2301,11 @@ static int _mmc_hw_reset(struct mmc_host *host)
*/
_mmc_flush_cache(host);
- if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
- mmc_can_reset(card)) {
+ if ((host->caps & MMC_CAP_HW_RESET) && host->ops->card_hw_reset &&
+ mmc_card_can_reset(card)) {
/* If the card accept RST_n signal, send it. */
mmc_set_clock(host, host->f_init);
- host->ops->hw_reset(host);
+ host->ops->card_hw_reset(host);
/* Set initial state and call mmc_set_ios */
mmc_set_initial_state(host);
} else {
@@ -2225,6 +2316,55 @@ static int _mmc_hw_reset(struct mmc_host *host)
return mmc_init_card(host, card->ocr, card);
}
+/**
+ * _mmc_handle_undervoltage - Handle an undervoltage event for MMC/eMMC devices
+ * @host: MMC host structure
+ *
+ * This function is triggered when an undervoltage condition is detected.
+ * It attempts to transition the device into a low-power or safe state to
+ * prevent data corruption.
+ *
+ * Steps performed:
+ * - Perform an emergency suspend using EXT_CSD_POWER_OFF_SHORT if possible.
+ * - If power-off notify is not supported, fallback mechanisms like sleep or
+ * deselecting the card are attempted.
+ * - Cache flushing is skipped to reduce execution time.
+ * - Mark the card as removed to prevent further interactions after
+ * undervoltage.
+ *
+ * Note: This function does not handle host claiming or releasing. The caller
+ * must ensure that the host is properly claimed before calling this
+ * function and released afterward.
+ *
+ * Returns: 0 on success, or a negative error code if any step fails.
+ */
+static int _mmc_handle_undervoltage(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ int err;
+
+ /*
+ * Perform an emergency suspend to power off the eMMC quickly.
+ * This ensures the device enters a safe state before power is lost.
+ * We first attempt EXT_CSD_POWER_OFF_SHORT, but if power-off notify
+ * is not supported, we fall back to sleep mode or deselecting the card.
+ * Cache flushing is skipped to minimize delay.
+ */
+ err = _mmc_suspend(host, MMC_POWEROFF_UNDERVOLTAGE);
+ if (err)
+ pr_err("%s: undervoltage suspend failed: %pe\n",
+ mmc_hostname(host), ERR_PTR(err));
+
+ /*
+ * Mark the card as removed to prevent further operations.
+ * This ensures the system does not attempt to access the device
+ * after an undervoltage event, avoiding potential corruption.
+ */
+ mmc_card_set_removed(card);
+
+ return err;
+}
+
static const struct mmc_bus_ops mmc_ops = {
.remove = mmc_remove,
.detect = mmc_detect,
@@ -2237,6 +2377,7 @@ static const struct mmc_bus_ops mmc_ops = {
.hw_reset = _mmc_hw_reset,
.cache_enabled = _mmc_cache_enabled,
.flush_cache = _mmc_flush_cache,
+ .handle_undervoltage = _mmc_handle_undervoltage,
};
/*
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 973756ed4016..a952cc8265af 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -21,6 +21,8 @@
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
+#define MMC_OP_COND_PERIOD_US (4 * 1000) /* 4ms */
+#define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
static const u8 tuning_blk_pattern_4bit[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@@ -58,6 +60,12 @@ struct mmc_busy_data {
enum mmc_busy_cmd busy_cmd;
};
+struct mmc_op_cond_busy_data {
+ struct mmc_host *host;
+ u32 ocr;
+ struct mmc_command *cmd;
+};
+
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
{
int err;
@@ -136,10 +144,24 @@ int mmc_set_dsr(struct mmc_host *host)
return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
}
+int __mmc_go_idle(struct mmc_host *host)
+{
+ struct mmc_command cmd = {};
+ int err;
+
+ cmd.opcode = MMC_GO_IDLE_STATE;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ mmc_delay(1);
+
+ return err;
+}
+
int mmc_go_idle(struct mmc_host *host)
{
int err;
- struct mmc_command cmd = {};
/*
* Non-SPI hosts need to prevent chipselect going active during
@@ -155,13 +177,7 @@ int mmc_go_idle(struct mmc_host *host)
mmc_delay(1);
}
- cmd.opcode = MMC_GO_IDLE_STATE;
- cmd.arg = 0;
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
-
- err = mmc_wait_for_cmd(host, &cmd, 0);
-
- mmc_delay(1);
+ err = __mmc_go_idle(host);
if (!mmc_host_is_spi(host)) {
mmc_set_chip_select(host, MMC_CS_DONTCARE);
@@ -173,43 +189,64 @@ int mmc_go_idle(struct mmc_host *host)
return err;
}
+static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
+{
+ struct mmc_op_cond_busy_data *data = cb_data;
+ struct mmc_host *host = data->host;
+ struct mmc_command *cmd = data->cmd;
+ u32 ocr = data->ocr;
+ int err = 0;
+
+ err = mmc_wait_for_cmd(host, cmd, 0);
+ if (err)
+ return err;
+
+ if (mmc_host_is_spi(host)) {
+ if (!(cmd->resp[0] & R1_SPI_IDLE)) {
+ *busy = false;
+ return 0;
+ }
+ } else {
+ if (cmd->resp[0] & MMC_CARD_BUSY) {
+ *busy = false;
+ return 0;
+ }
+ }
+
+ *busy = true;
+
+ /*
+ * According to eMMC specification v5.1 section 6.4.3, we
+ * should issue CMD1 repeatedly in the idle state until
+ * the eMMC is ready. Otherwise some eMMC devices seem to enter
+ * the inactive mode after mmc_init_card() issued CMD0 when
+ * the eMMC device is busy.
+ */
+ if (!ocr && !mmc_host_is_spi(host))
+ cmd->arg = cmd->resp[0] | BIT(30);
+
+ return 0;
+}
+
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {};
- int i, err = 0;
+ int err = 0;
+ struct mmc_op_cond_busy_data cb_data = {
+ .host = host,
+ .ocr = ocr,
+ .cmd = &cmd
+ };
cmd.opcode = MMC_SEND_OP_COND;
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
- for (i = 100; i; i--) {
- err = mmc_wait_for_cmd(host, &cmd, 0);
- if (err)
- break;
-
- /* wait until reset completes */
- if (mmc_host_is_spi(host)) {
- if (!(cmd.resp[0] & R1_SPI_IDLE))
- break;
- } else {
- if (cmd.resp[0] & MMC_CARD_BUSY)
- break;
- }
-
- err = -ETIMEDOUT;
-
- mmc_delay(10);
-
- /*
- * According to eMMC specification v5.1 section 6.4.3, we
- * should issue CMD1 repeatedly in the idle state until
- * the eMMC is ready. Otherwise some eMMC devices seem to enter
- * the inactive mode after mmc_init_card() issued CMD0 when
- * the eMMC device is busy.
- */
- if (!ocr && !mmc_host_is_spi(host))
- cmd.arg = cmd.resp[0] | BIT(30);
- }
+ err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
+ MMC_OP_COND_TIMEOUT_MS,
+ &__mmc_send_op_cond_cb, &cb_data);
+ if (err)
+ return err;
if (rocr && !mmc_host_is_spi(host))
*rocr = cmd.resp[0];
@@ -346,7 +383,7 @@ int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
if (!card || !new_ext_csd)
return -EINVAL;
- if (!mmc_can_ext_csd(card))
+ if (!mmc_card_can_ext_csd(card))
return -EOPNOTSUPP;
/*
@@ -435,7 +472,7 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
u32 status = 0;
int err;
- if (host->ops->card_busy) {
+ if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
*busy = host->ops->card_busy(host);
return 0;
}
@@ -457,6 +494,7 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
break;
case MMC_BUSY_HPI:
case MMC_BUSY_EXTR_SINGLE:
+ case MMC_BUSY_IO:
break;
default:
err = -EINVAL;
@@ -469,14 +507,14 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
return 0;
}
-int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
+ unsigned int timeout_ms,
int (*busy_cb)(void *cb_data, bool *busy),
void *cb_data)
{
- struct mmc_host *host = card->host;
int err;
unsigned long timeout;
- unsigned int udelay = 32, udelay_max = 32768;
+ unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
bool expired = false;
bool busy = false;
@@ -509,18 +547,21 @@ int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
return 0;
}
+EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
{
+ struct mmc_host *host = card->host;
struct mmc_busy_data cb_data;
cb_data.card = card;
cb_data.retry_crc_err = retry_crc_err;
cb_data.busy_cmd = busy_cmd;
- return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
+ return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
}
+EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
unsigned int timeout_ms)
@@ -542,6 +583,7 @@ bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
cmd->busy_timeout = timeout_ms;
return true;
}
+EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd);
/**
* __mmc_switch - modify EXT_CSD register
@@ -902,7 +944,7 @@ out:
return err;
}
-int mmc_can_ext_csd(struct mmc_card *card)
+bool mmc_card_can_ext_csd(struct mmc_card *card)
{
return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
}
@@ -956,8 +998,15 @@ void mmc_run_bkops(struct mmc_card *card)
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
- if (err)
- pr_warn("%s: Error %d starting bkops\n",
+ /*
+ * If the BKOPS timed out, the card is probably still busy in the
+ * R1_STATE_PRG. Rather than continue to wait, let's try to abort
+ * it with a HPI command to get back into R1_STATE_TRAN.
+ */
+ if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
+ pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
+ else if (err)
+ pr_warn("%s: Error %d running bkops\n",
mmc_hostname(card->host), err);
mmc_retune_release(card->host);
@@ -997,7 +1046,7 @@ int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
struct mmc_host *host = card->host;
int err;
- if (!mmc_can_sanitize(card)) {
+ if (!mmc_card_can_sanitize(card)) {
pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
return -EOPNOTSUPP;
}
@@ -1028,3 +1077,75 @@ int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
return err;
}
EXPORT_SYMBOL_GPL(mmc_sanitize);
+
+/**
+ * mmc_read_tuning() - read data blocks from the mmc
+ * @host: mmc host doing the read
+ * @blksz: data block size
+ * @blocks: number of blocks to read
+ *
+ * Read one or more blocks of data from the beginning of the mmc. This is a
+ * low-level helper for tuning operation. It is assumed that CMD23 can be used
+ * for multi-block read if the host supports it.
+ *
+ * Note: Allocate and free a temporary buffer to store the data read. The data
+ * is not available outside of the function, only the status of the read
+ * operation.
+ *
+ * Return: 0 in case of success, otherwise -EIO / -ENOMEM / -E2BIG
+ */
+int mmc_read_tuning(struct mmc_host *host, unsigned int blksz, unsigned int blocks)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command sbc = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+ void *buf;
+ unsigned int len;
+
+ if (blocks > 1) {
+ if (mmc_host_can_cmd23(host)) {
+ mrq.sbc = &sbc;
+ sbc.opcode = MMC_SET_BLOCK_COUNT;
+ sbc.arg = blocks;
+ sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ }
+ cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
+ mrq.stop = &stop;
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.opcode = MMC_READ_SINGLE_BLOCK;
+ }
+
+ mrq.cmd = &cmd;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ mrq.data = &data;
+ data.flags = MMC_DATA_READ;
+ data.blksz = blksz;
+ data.blocks = blocks;
+ data.blk_addr = 0;
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.timeout_ns = 1000000000;
+
+ if (check_mul_overflow(blksz, blocks, &len))
+ return -E2BIG;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ sg_init_one(&sg, buf, len);
+
+ mmc_wait_for_req(host, &mrq);
+ kfree(buf);
+
+ if (sbc.error || cmd.error || data.error)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_read_tuning);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 41ab4f573a31..514c40ff4b4e 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -15,6 +15,7 @@ enum mmc_busy_cmd {
MMC_BUSY_ERASE,
MMC_BUSY_HPI,
MMC_BUSY_EXTR_SINGLE,
+ MMC_BUSY_IO,
};
struct mmc_host;
@@ -24,6 +25,7 @@ struct mmc_command;
int mmc_select_card(struct mmc_card *card);
int mmc_deselect_cards(struct mmc_host *host);
int mmc_set_dsr(struct mmc_host *host);
+int __mmc_go_idle(struct mmc_host *host);
int mmc_go_idle(struct mmc_host *host);
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_set_relative_addr(struct mmc_card *card);
@@ -31,17 +33,16 @@ int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
u32 args, void *buf, unsigned len);
int mmc_send_csd(struct mmc_card *card, u32 *csd);
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries);
-int mmc_send_status(struct mmc_card *card, u32 *status);
int mmc_send_cid(struct mmc_host *host, u32 *cid);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
-int mmc_can_ext_csd(struct mmc_card *card);
-int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
+bool mmc_card_can_ext_csd(struct mmc_card *card);
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
unsigned int timeout_ms);
-int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
+ unsigned int timeout_ms,
int (*busy_cb)(void *cb_data, bool *busy),
void *cb_data);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
@@ -56,5 +57,19 @@ int mmc_cmdq_enable(struct mmc_card *card);
int mmc_cmdq_disable(struct mmc_card *card);
int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms);
+static inline u32 unstuff_bits(const u32 *resp, int start, int size)
+{
+ const int __size = size;
+ const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1;
+ const int __off = 3 - (start / 32);
+ const int __shft = start & 31;
+ u32 __res = resp[__off] >> __shft;
+
+ if (__size + __shft > 32)
+ __res |= resp[__off - 1] << ((32 - __shft) % 32);
+
+ return __res & __mask;
+}
+
#endif
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 63524551a13a..01d1e62c2ce7 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -10,7 +10,6 @@
#include <linux/slab.h>
#include <linux/scatterlist.h>
-#include <linux/swap.h> /* For nr_free_buffer_pages() */
#include <linux/list.h>
#include <linux/debugfs.h>
@@ -181,20 +180,14 @@ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
return mmc_set_blocklen(test->card, size);
}
-static bool mmc_test_card_cmd23(struct mmc_card *card)
-{
- return mmc_card_mmc(card) ||
- (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
-}
-
static void mmc_test_prepare_sbc(struct mmc_test_card *test,
struct mmc_request *mrq, unsigned int blocks)
{
struct mmc_card *card = test->card;
- if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
- !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
- (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
+ if (!mrq->sbc || !mmc_host_can_cmd23(card->host) ||
+ !mmc_card_can_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
+ mmc_card_blk_no_cmd23(card)) {
mrq->sbc = NULL;
return;
}
@@ -593,14 +586,11 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
rate = mmc_test_rate(tot, &ts);
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
- pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
- "%llu.%09u seconds (%u kB/s, %u KiB/s, "
- "%u.%02u IOPS, sg_len %d)\n",
- mmc_hostname(test->card->host), count, sectors, count,
- sectors >> 1, (sectors & 1 ? ".5" : ""),
- (u64)ts.tv_sec, (u32)ts.tv_nsec,
- rate / 1000, rate / 1024, iops / 100, iops % 100,
- test->area.sg_len);
+ pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took %ptSp seconds (%u kB/s, %u KiB/s, %u.%02u IOPS, sg_len %d)\n",
+ mmc_hostname(test->card->host), count, sectors, count,
+ sectors >> 1, (sectors & 1 ? ".5" : ""), &ts,
+ rate / 1000, rate / 1024, iops / 100, iops % 100,
+ test->area.sg_len);
mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
}
@@ -933,7 +923,6 @@ static int mmc_test_transfer(struct mmc_test_card *test,
unsigned blocks, unsigned blksz, int write)
{
int ret, i;
- unsigned long flags;
if (write) {
for (i = 0; i < blocks * blksz; i++)
@@ -941,9 +930,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
} else {
memset(test->scratch, 0, BUFFER_SIZE);
}
- local_irq_save(flags);
sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
- local_irq_restore(flags);
ret = mmc_test_set_blksize(test, blksz);
if (ret)
@@ -988,9 +975,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
return RESULT_FAIL;
}
} else {
- local_irq_save(flags);
sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
- local_irq_restore(flags);
for (i = 0; i < blocks * blksz; i++) {
if (test->scratch[i] != (u8)i)
return RESULT_FAIL;
@@ -1516,7 +1501,7 @@ static int mmc_test_area_erase(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
- if (!mmc_can_erase(test->card))
+ if (!mmc_card_can_erase(test->card))
return 0;
return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
@@ -1752,10 +1737,10 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
struct timespec64 ts1, ts2;
int ret;
- if (!mmc_can_trim(test->card))
+ if (!mmc_card_can_trim(test->card))
return RESULT_UNSUP_CARD;
- if (!mmc_can_erase(test->card))
+ if (!mmc_card_can_erase(test->card))
return RESULT_UNSUP_HOST;
for (sz = 512; sz < t->max_sz; sz <<= 1) {
@@ -1869,10 +1854,10 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
struct timespec64 ts1, ts2;
int ret;
- if (!mmc_can_trim(test->card))
+ if (!mmc_card_can_trim(test->card))
return RESULT_UNSUP_CARD;
- if (!mmc_can_erase(test->card))
+ if (!mmc_card_can_erase(test->card))
return RESULT_UNSUP_HOST;
for (sz = 512; sz <= t->max_sz; sz <<= 1) {
@@ -1910,7 +1895,7 @@ static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
}
static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
- unsigned long sz)
+ unsigned long sz, int secs, int force_retuning)
{
unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
unsigned int ssz;
@@ -1927,7 +1912,7 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
for (cnt = 0; cnt < UINT_MAX; cnt++) {
ktime_get_ts64(&ts2);
ts = timespec64_sub(ts2, ts1);
- if (ts.tv_sec >= 10)
+ if (ts.tv_sec >= secs)
break;
ea = mmc_test_rnd_num(range1);
if (ea == last_ea)
@@ -1935,6 +1920,8 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
last_ea = ea;
dev_addr = rnd_addr + test->card->pref_erase * ea +
ssz * mmc_test_rnd_num(range2);
+ if (force_retuning)
+ mmc_retune_needed(test->card->host);
ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
if (ret)
return ret;
@@ -1959,24 +1946,35 @@ static int mmc_test_random_perf(struct mmc_test_card *test, int write)
*/
if (write) {
next = rnd_next;
- ret = mmc_test_rnd_perf(test, write, 0, sz);
+ ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0);
if (ret)
return ret;
rnd_next = next;
}
- ret = mmc_test_rnd_perf(test, write, 1, sz);
+ ret = mmc_test_rnd_perf(test, write, 1, sz, 10, 0);
if (ret)
return ret;
}
sz = t->max_tfr;
if (write) {
next = rnd_next;
- ret = mmc_test_rnd_perf(test, write, 0, sz);
+ ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0);
if (ret)
return ret;
rnd_next = next;
}
- return mmc_test_rnd_perf(test, write, 1, sz);
+ return mmc_test_rnd_perf(test, write, 1, sz, 10, 0);
+}
+
+static int mmc_test_retuning(struct mmc_test_card *test)
+{
+ if (!mmc_can_retune(test->card->host)) {
+ pr_info("%s: No retuning - test skipped\n",
+ mmc_hostname(test->card->host));
+ return RESULT_UNSUP_HOST;
+ }
+
+ return mmc_test_rnd_perf(test, 0, 0, 8192, 30, 1);
}
/*
@@ -2107,7 +2105,7 @@ static int mmc_test_rw_multiple(struct mmc_test_card *test,
return 0;
/* prepare test area */
- if (mmc_can_erase(test->card) &&
+ if (mmc_card_can_erase(test->card) &&
tdata->prepare & MMC_TEST_PREP_ERASE) {
ret = mmc_erase(test->card, dev_addr,
size / 512, test->card->erase_arg);
@@ -2326,10 +2324,9 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
static int mmc_test_reset(struct mmc_test_card *test)
{
struct mmc_card *card = test->card;
- struct mmc_host *host = card->host;
int err;
- err = mmc_hw_reset(host);
+ err = mmc_hw_reset(card);
if (!err) {
/*
* Reset will re-enable the card's command queue, but tests
@@ -2384,7 +2381,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
512, write);
if (use_sbc && t->blocks > 1 && !mrq->sbc) {
- ret = mmc_host_cmd23(host) ?
+ ret = mmc_host_can_cmd23(host) ?
RESULT_UNSUP_CARD :
RESULT_UNSUP_HOST;
goto out_free;
@@ -2928,6 +2925,14 @@ static const struct mmc_test_case mmc_test_cases[] = {
.run = mmc_test_cmds_during_write_cmd23_nonblock,
.cleanup = mmc_test_area_cleanup,
},
+
+ {
+ .name = "Re-tuning reliability",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_retuning,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
};
static DEFINE_MUTEX(mmc_test_lock);
@@ -3052,7 +3057,7 @@ static LIST_HEAD(mmc_test_file_test);
static int mtf_test_show(struct seq_file *sf, void *data)
{
- struct mmc_card *card = (struct mmc_card *)sf->private;
+ struct mmc_card *card = sf->private;
struct mmc_test_general_result *gr;
mutex_lock(&mmc_test_lock);
@@ -3066,10 +3071,9 @@ static int mtf_test_show(struct seq_file *sf, void *data)
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
- seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
- tr->count, tr->sectors,
- (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
- tr->rate, tr->iops / 100, tr->iops % 100);
+ seq_printf(sf, "%u %d %ptSp %u %u.%02u\n",
+ tr->count, tr->sectors, &tr->ts, tr->rate,
+ tr->iops / 100, tr->iops % 100);
}
}
@@ -3086,8 +3090,8 @@ static int mtf_test_open(struct inode *inode, struct file *file)
static ssize_t mtf_test_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct seq_file *sf = (struct seq_file *)file->private_data;
- struct mmc_card *card = (struct mmc_card *)sf->private;
+ struct seq_file *sf = file->private_data;
+ struct mmc_card *card = sf->private;
struct mmc_test_card *test;
long testcase;
int ret;
@@ -3111,13 +3115,13 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
#ifdef CONFIG_HIGHMEM
test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
+ if (!test->highmem) {
+ count = -ENOMEM;
+ goto free_test_buffer;
+ }
#endif
-#ifdef CONFIG_HIGHMEM
- if (test->buffer && test->highmem) {
-#else
if (test->buffer) {
-#endif
mutex_lock(&mmc_test_lock);
mmc_test_run(test, testcase);
mutex_unlock(&mmc_test_lock);
@@ -3125,6 +3129,7 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
#ifdef CONFIG_HIGHMEM
__free_pages(test->highmem, BUFFER_ORDER);
+free_test_buffer:
#endif
kfree(test->buffer);
kfree(test);
@@ -3181,7 +3186,8 @@ static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
struct mmc_test_dbgfs_file *df;
if (card->debugfs_root)
- debugfs_create_file(name, mode, card->debugfs_root, card, fops);
+ file = debugfs_create_file(name, mode, card->debugfs_root,
+ card, fops);
df = kmalloc(sizeof(*df), GFP_KERNEL);
if (!df) {
@@ -3202,12 +3208,12 @@ static int mmc_test_register_dbgfs_file(struct mmc_card *card)
mutex_lock(&mmc_test_lock);
- ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
+ ret = __mmc_test_register_dbgfs_file(card, "test", 0644,
&mmc_test_fops_test);
if (ret)
goto err;
- ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
+ ret = __mmc_test_register_dbgfs_file(card, "testlist", 0444,
&mtf_testlist_fops);
if (ret)
goto err;
@@ -3225,6 +3231,12 @@ static int mmc_test_probe(struct mmc_card *card)
if (!mmc_card_mmc(card) && !mmc_card_sd(card))
return -ENODEV;
+ if (mmc_card_ult_capacity(card)) {
+ pr_info("%s: mmc-test currently UNSUPPORTED for SDUC\n",
+ mmc_hostname(card->host));
+ return -EOPNOTSUPP;
+ }
+
ret = mmc_test_register_dbgfs_file(card);
if (ret)
return ret;
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
index ef675f364bf0..2374669b588a 100644
--- a/drivers/mmc/core/pwrseq.c
+++ b/drivers/mmc/core/pwrseq.c
@@ -29,7 +29,7 @@ int mmc_pwrseq_alloc(struct mmc_host *host)
mutex_lock(&pwrseq_list_mutex);
list_for_each_entry(p, &pwrseq_list, pwrseq_node) {
- if (p->dev->of_node == np) {
+ if (device_match_of_node(p->dev, np)) {
if (!try_module_get(p->owner))
dev_err(host->parent,
"increasing module refcount failed\n");
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index f6dde9edd7a3..35af67e26945 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -90,14 +90,12 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
return mmc_pwrseq_register(&pwrseq->pwrseq);
}
-static int mmc_pwrseq_emmc_remove(struct platform_device *pdev)
+static void mmc_pwrseq_emmc_remove(struct platform_device *pdev)
{
struct mmc_pwrseq_emmc *pwrseq = platform_get_drvdata(pdev);
unregister_restart_handler(&pwrseq->reset_nb);
mmc_pwrseq_unregister(&pwrseq->pwrseq);
-
- return 0;
}
static const struct of_device_id mmc_pwrseq_emmc_of_match[] = {
@@ -117,4 +115,5 @@ static struct platform_driver mmc_pwrseq_emmc_driver = {
};
module_platform_driver(mmc_pwrseq_emmc_driver);
+MODULE_DESCRIPTION("Hardware reset support for eMMC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c
index 68a826f1c0a1..30282155a0e1 100644
--- a/drivers/mmc/core/pwrseq_sd8787.c
+++ b/drivers/mmc/core/pwrseq_sd8787.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -49,13 +50,37 @@ static void mmc_pwrseq_sd8787_power_off(struct mmc_host *host)
gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
}
+static void mmc_pwrseq_wilc1000_pre_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+ /* The pwrdn_gpio is really CHIP_EN, reset_gpio is RESETN */
+ gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
+ msleep(5);
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
+}
+
+static void mmc_pwrseq_wilc1000_power_off(struct mmc_host *host)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
+ gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0);
+}
+
static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = {
.pre_power_on = mmc_pwrseq_sd8787_pre_power_on,
.power_off = mmc_pwrseq_sd8787_power_off,
};
+static const struct mmc_pwrseq_ops mmc_pwrseq_wilc1000_ops = {
+ .pre_power_on = mmc_pwrseq_wilc1000_pre_power_on,
+ .power_off = mmc_pwrseq_wilc1000_power_off,
+};
+
static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = {
- { .compatible = "mmc-pwrseq-sd8787",},
+ { .compatible = "mmc-pwrseq-sd8787", .data = &mmc_pwrseq_sd8787_ops },
+ { .compatible = "mmc-pwrseq-wilc1000", .data = &mmc_pwrseq_wilc1000_ops },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match);
@@ -64,11 +89,14 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_sd8787 *pwrseq;
struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
return -ENOMEM;
+ match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node);
+
pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW);
if (IS_ERR(pwrseq->pwrdn_gpio))
return PTR_ERR(pwrseq->pwrdn_gpio);
@@ -78,20 +106,18 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
return PTR_ERR(pwrseq->reset_gpio);
pwrseq->pwrseq.dev = dev;
- pwrseq->pwrseq.ops = &mmc_pwrseq_sd8787_ops;
+ pwrseq->pwrseq.ops = match->data;
pwrseq->pwrseq.owner = THIS_MODULE;
platform_set_drvdata(pdev, pwrseq);
return mmc_pwrseq_register(&pwrseq->pwrseq);
}
-static int mmc_pwrseq_sd8787_remove(struct platform_device *pdev)
+static void mmc_pwrseq_sd8787_remove(struct platform_device *pdev)
{
struct mmc_pwrseq_sd8787 *pwrseq = platform_get_drvdata(pdev);
mmc_pwrseq_unregister(&pwrseq->pwrseq);
-
- return 0;
}
static struct platform_driver mmc_pwrseq_sd8787_driver = {
@@ -104,4 +130,5 @@ static struct platform_driver mmc_pwrseq_sd8787_driver = {
};
module_platform_driver(mmc_pwrseq_sd8787_driver);
+MODULE_DESCRIPTION("Power sequence support for Marvell SD8787 BT + Wifi chip");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index ea4d3670560e..4b47e6c3b04b 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -17,6 +17,8 @@
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/property.h>
+#include <linux/of.h>
+#include <linux/reset.h>
#include <linux/mmc/host.h>
@@ -29,6 +31,7 @@ struct mmc_pwrseq_simple {
u32 power_off_delay_us;
struct clk *ext_clk;
struct gpio_descs *reset_gpios;
+ struct reset_control *reset_ctrl;
};
#define to_pwrseq_simple(p) container_of(p, struct mmc_pwrseq_simple, pwrseq)
@@ -51,10 +54,9 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
else
bitmap_zero(values, nvalues);
- gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc,
- reset_gpios->info, values);
+ gpiod_multi_set_value_cansleep(reset_gpios, values);
- kfree(values);
+ bitmap_free(values);
}
}
@@ -67,14 +69,21 @@ static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
pwrseq->clk_enabled = true;
}
- mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
+ if (pwrseq->reset_ctrl) {
+ reset_control_deassert(pwrseq->reset_ctrl);
+ reset_control_assert(pwrseq->reset_ctrl);
+ } else
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
}
static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
{
struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
- mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
+ if (pwrseq->reset_ctrl)
+ reset_control_deassert(pwrseq->reset_ctrl);
+ else
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
if (pwrseq->post_power_on_delay_ms)
msleep(pwrseq->post_power_on_delay_ms);
@@ -84,7 +93,10 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
{
struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
- mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
+ if (pwrseq->reset_ctrl)
+ reset_control_assert(pwrseq->reset_ctrl);
+ else
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
if (pwrseq->power_off_delay_us)
usleep_range(pwrseq->power_off_delay_us,
@@ -112,6 +124,7 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_simple *pwrseq;
struct device *dev = &pdev->dev;
+ int ngpio;
pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
@@ -119,14 +132,28 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
pwrseq->ext_clk = devm_clk_get(dev, "ext_clock");
if (IS_ERR(pwrseq->ext_clk) && PTR_ERR(pwrseq->ext_clk) != -ENOENT)
- return PTR_ERR(pwrseq->ext_clk);
-
- pwrseq->reset_gpios = devm_gpiod_get_array(dev, "reset",
- GPIOD_OUT_HIGH);
- if (IS_ERR(pwrseq->reset_gpios) &&
- PTR_ERR(pwrseq->reset_gpios) != -ENOENT &&
- PTR_ERR(pwrseq->reset_gpios) != -ENOSYS) {
- return PTR_ERR(pwrseq->reset_gpios);
+ return dev_err_probe(dev, PTR_ERR(pwrseq->ext_clk), "external clock not ready\n");
+
+ ngpio = of_count_phandle_with_args(dev->of_node, "reset-gpios", "#gpio-cells");
+ if (ngpio == 1) {
+ pwrseq->reset_ctrl = devm_reset_control_get_optional_shared(dev, NULL);
+ if (IS_ERR(pwrseq->reset_ctrl))
+ return dev_err_probe(dev, PTR_ERR(pwrseq->reset_ctrl),
+ "reset control not ready\n");
+ }
+
+ /*
+ * Fallback to GPIO based reset control in case of multiple reset lines
+ * are specified or the platform doesn't have support for RESET at all.
+ */
+ if (!pwrseq->reset_ctrl) {
+ pwrseq->reset_gpios = devm_gpiod_get_array(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pwrseq->reset_gpios) &&
+ PTR_ERR(pwrseq->reset_gpios) != -ENOENT &&
+ PTR_ERR(pwrseq->reset_gpios) != -ENOSYS) {
+ return dev_err_probe(dev, PTR_ERR(pwrseq->reset_gpios),
+ "reset GPIOs not ready\n");
+ }
}
device_property_read_u32(dev, "post-power-on-delay-ms",
@@ -142,13 +169,11 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
return mmc_pwrseq_register(&pwrseq->pwrseq);
}
-static int mmc_pwrseq_simple_remove(struct platform_device *pdev)
+static void mmc_pwrseq_simple_remove(struct platform_device *pdev)
{
struct mmc_pwrseq_simple *pwrseq = platform_get_drvdata(pdev);
mmc_pwrseq_unregister(&pwrseq->pwrseq);
-
- return 0;
}
static struct platform_driver mmc_pwrseq_simple_driver = {
@@ -161,4 +186,5 @@ static struct platform_driver mmc_pwrseq_simple_driver = {
};
module_platform_driver(mmc_pwrseq_simple_driver);
+MODULE_DESCRIPTION("Simple power sequence management for MMC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index cc3261777637..284856c8f655 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -48,6 +48,7 @@ static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
case REQ_OP_DRV_OUT:
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_ZEROES:
return MMC_ISSUE_SYNC;
case REQ_OP_FLUSH:
return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
@@ -116,8 +117,7 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
}
}
-static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
- bool reserved)
+static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req)
{
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
@@ -163,7 +163,7 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
blk_mq_run_hw_queues(q, true);
}
-static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
+static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
{
struct scatterlist *sg;
@@ -174,8 +174,8 @@ static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
return sg;
}
-static void mmc_queue_setup_discard(struct request_queue *q,
- struct mmc_card *card)
+static void mmc_queue_setup_discard(struct mmc_card *card,
+ struct queue_limits *lim)
{
unsigned max_discard;
@@ -183,43 +183,42 @@ static void mmc_queue_setup_discard(struct request_queue *q,
if (!max_discard)
return;
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- blk_queue_max_discard_sectors(q, max_discard);
- q->limits.discard_granularity = card->pref_erase << 9;
+ lim->max_hw_discard_sectors = max_discard;
+ if (mmc_card_can_secure_erase_trim(card))
+ lim->max_secure_erase_sectors = max_discard;
+ if (mmc_card_can_trim(card) && card->erased_byte == 0)
+ lim->max_write_zeroes_sectors = max_discard;
+
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
- q->limits.discard_granularity = SECTOR_SIZE;
- if (mmc_can_secure_erase_trim(card))
- blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
+ lim->discard_granularity = SECTOR_SIZE;
+ else
+ lim->discard_granularity = card->pref_erase << 9;
}
-static unsigned int mmc_get_max_segments(struct mmc_host *host)
+static unsigned short mmc_get_max_segments(struct mmc_host *host)
{
return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
host->max_segs;
}
-/**
- * mmc_init_request() - initialize the MMC-specific per-request data
- * @mq: the request queue
- * @req: the request
- * @gfp: memory allocation policy
- */
-static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
- gfp_t gfp)
+static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
+ struct mmc_queue *mq = set->driver_data;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
- mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
+ mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL);
if (!mq_rq->sg)
return -ENOMEM;
return 0;
}
-static void mmc_exit_request(struct request_queue *q, struct request *req)
+static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
@@ -227,20 +226,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
-static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
-}
-
-static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
- unsigned int hctx_idx)
-{
- struct mmc_queue *mq = set->driver_data;
-
- mmc_exit_request(mq->queue, req);
-}
-
static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -252,7 +237,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
enum mmc_issue_type issue_type;
enum mmc_issued issued;
bool get_card, cqe_retune_ok;
- int ret;
+ blk_status_t ret;
if (mmc_card_removed(mq->card)) {
req->rq_flags |= RQF_QUIET;
@@ -277,11 +262,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
}
break;
case MMC_ISSUE_ASYNC:
- /*
- * For MMC host software queue, we only allow 2 requests in
- * flight to avoid a long latency.
- */
- if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
+ if (host->hsq_enabled && mq->in_flight[issue_type] > host->hsq_depth) {
spin_unlock_irq(&mq->lock);
return BLK_STS_RESOURCE;
}
@@ -362,42 +343,53 @@ static const struct blk_mq_ops mmc_mq_ops = {
.timeout = mmc_mq_timed_out,
};
-static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
+ struct mmc_card *card, unsigned int features)
{
struct mmc_host *host = card->host;
- unsigned block_size = 512;
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
- if (mmc_can_erase(card))
- mmc_queue_setup_discard(mq->queue, card);
-
- if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
- blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- if (host->can_dma_map_merge)
- WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
- mmc_dev(host)),
- "merging was advertised but not possible");
- blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
-
- if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
- block_size = card->ext_csd.data_sector_size;
- WARN_ON(block_size != 512 && block_size != 4096);
- }
+ struct queue_limits lim = {
+ .features = features,
+ };
+ struct gendisk *disk;
+
+ if (mmc_card_can_erase(card))
+ mmc_queue_setup_discard(card, &lim);
+
+ lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512);
+
+ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size)
+ lim.logical_block_size = card->ext_csd.data_sector_size;
+ else
+ lim.logical_block_size = 512;
+
+ WARN_ON_ONCE(lim.logical_block_size != 512 &&
+ lim.logical_block_size != 4096);
- blk_queue_logical_block_size(mq->queue, block_size);
/*
- * After blk_queue_can_use_dma_map_merging() was called with succeed,
- * since it calls blk_queue_virt_boundary(), the mmc should not call
- * both blk_queue_max_segment_size().
+ * Setting a virt_boundary implicity sets a max_segment_size, so try
+ * to set the hardware one here.
*/
- if (!host->can_dma_map_merge)
- blk_queue_max_segment_size(mq->queue,
- round_down(host->max_seg_size, block_size));
+ if (host->can_dma_map_merge) {
+ lim.virt_boundary_mask = dma_get_merge_boundary(mmc_dev(host));
+ lim.max_segments = MMC_DMA_MAP_MERGE_SEGMENTS;
+ } else {
+ lim.max_segment_size =
+ round_down(host->max_seg_size, lim.logical_block_size);
+ lim.max_segments = host->max_segs;
+ }
- dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
+ if (mmc_host_is_spi(host) && host->use_spi_crc)
+ lim.features |= BLK_FEAT_STABLE_WRITES;
+
+ disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq);
+ if (IS_ERR(disk))
+ return disk;
+ mq->queue = disk->queue;
+
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
+
+ if (mmc_dev(host)->dma_parms)
+ dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
@@ -407,6 +399,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
init_waitqueue_head(&mq->wait);
mmc_crypto_setup_queue(mq->queue, host);
+ return disk;
}
static inline bool mmc_merge_capable(struct mmc_host *host)
@@ -421,10 +414,12 @@ static inline bool mmc_merge_capable(struct mmc_host *host)
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
* @card: mmc card to attach this queue
+ * @features: block layer features (BLK_FEAT_*)
*
* Initialise a MMC card request queue.
*/
-struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
+struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ unsigned int features)
{
struct mmc_host *host = card->host;
struct gendisk *disk;
@@ -446,7 +441,7 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
else
mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
mq->tag_set.numa_node = NUMA_NO_NODE;
- mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ mq->tag_set.flags = BLK_MQ_F_BLOCKING;
mq->tag_set.nr_hw_queues = 1;
mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
mq->tag_set.driver_data = mq;
@@ -468,18 +463,9 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
return ERR_PTR(ret);
- disk = blk_mq_alloc_disk(&mq->tag_set, mq);
- if (IS_ERR(disk)) {
+ disk = mmc_alloc_disk(mq, card, features);
+ if (IS_ERR(disk))
blk_mq_free_tag_set(&mq->tag_set);
- return disk;
- }
- mq->queue = disk->queue;
-
- if (mmc_host_is_spi(host) && host->use_spi_crc)
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
- blk_queue_rq_timeout(mq->queue, 60 * HZ);
-
- mmc_setup_queue(mq, card);
return disk;
}
@@ -511,7 +497,13 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
if (blk_queue_quiesced(q))
blk_mq_unquiesce_queue(q);
- blk_cleanup_queue(q);
+ /*
+ * If the recovery completes the last (and only remaining) request in
+ * the queue, and the card has been removed, we could end up here with
+ * the recovery not quite finished yet, so cancel it.
+ */
+ cancel_work_sync(&mq->recovery_work);
+
blk_mq_free_tag_set(&mq->tag_set);
/*
@@ -531,5 +523,5 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
struct request *req = mmc_queue_req_to_req(mqrq);
- return blk_rq_map_sg(mq->queue, req, mqrq->sg);
+ return blk_rq_map_sg(req, mqrq->sg);
}
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 9ade3bcbb714..1498840a4ea0 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -94,7 +94,8 @@ struct mmc_queue {
struct work_struct complete_work;
};
-struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card);
+struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ unsigned int features);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index d68e6e513a4f..c417ed34c057 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -10,10 +10,49 @@
*
*/
+#include <linux/of.h>
#include <linux/mmc/sdio_ids.h>
#include "card.h"
+static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
+ /*
+ * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
+ * This has so far only been observed on cards from 11/2019, while new
+ * cards from 2023/05 do not exhibit this behavior.
+ */
+ _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
+ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+
+ /*
+ * GIGASTONE Gaming Plus microSD cards manufactured on 02/2022 never
+ * clear Flush Cache bit and set Poweroff Notification Ready bit.
+ */
+ _FIXUP_EXT("ASTC", CID_MANFID_GIGASTONE, 0x3456, 2022, 2,
+ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_CACHE | MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY,
+ EXT_CSD_REV_ANY),
+
+ /*
+ * Swissbit series S46-u cards throw I/O errors during tuning requests
+ * after the initial tuning request expectedly times out. This has
+ * only been observed on cards manufactured on 01/2019 that are using
+ * Bay Trail host controllers.
+ */
+ _FIXUP_EXT("0016G", CID_MANFID_SWISSBIT, 0x5342, 2019, 1,
+ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_NO_UHS_DDR50_TUNING, EXT_CSD_REV_ANY),
+
+ /*
+ * Some SD cards reports discard support while they don't
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_DISCARD),
+
+ END_FIXUP
+};
+
static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
#define INAND_CMD38_ARG_EXT_CSD 113
#define INAND_CMD38_ARG_ERASE 0x00
@@ -99,6 +138,21 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_TRIM_BROKEN),
+ /*
+ * Micron MTFC4GACAJCN-1M supports TRIM but does not appear to support
+ * WRITE_ZEROES offloading. It also supports caching, but the cache can
+ * only be flushed after a write has occurred.
+ */
+ MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN | MMC_QUIRK_BROKEN_CACHE_FLUSH),
+
+ /*
+ * Kingston EMMC04G-M627 advertises TRIM but it does not seems to
+ * support being used to offload WRITE_ZEROES.
+ */
+ MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
END_FIXUP
};
@@ -145,6 +199,32 @@ static const struct mmc_fixup __maybe_unused sdio_fixup_methods[] = {
END_FIXUP
};
+static const struct mmc_fixup __maybe_unused sdio_card_init_methods[] = {
+ SDIO_FIXUP_COMPATIBLE("ti,wl1251", wl1251_quirk, 0),
+
+ SDIO_FIXUP_COMPATIBLE("silabs,wf200", add_quirk,
+ MMC_QUIRK_BROKEN_BYTE_MODE_512 |
+ MMC_QUIRK_LENIENT_FN0 |
+ MMC_QUIRK_BLKSZ_FOR_BYTE_MODE),
+
+ END_FIXUP
+};
+
+static inline bool mmc_fixup_of_compatible_match(struct mmc_card *card,
+ const char *compatible)
+{
+ struct device_node *np;
+
+ for_each_child_of_node(mmc_dev(card->host)->of_node, np) {
+ if (of_device_is_compatible(np, compatible)) {
+ of_node_put(np);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static inline void mmc_fixup_device(struct mmc_card *card,
const struct mmc_fixup *table)
{
@@ -152,22 +232,36 @@ static inline void mmc_fixup_device(struct mmc_card *card,
u64 rev = cid_rev_card(card);
for (f = table; f->vendor_fixup; f++) {
- if ((f->manfid == CID_MANFID_ANY ||
- f->manfid == card->cid.manfid) &&
- (f->oemid == CID_OEMID_ANY ||
- f->oemid == card->cid.oemid) &&
- (f->name == CID_NAME_ANY ||
- !strncmp(f->name, card->cid.prod_name,
- sizeof(card->cid.prod_name))) &&
- (f->cis_vendor == card->cis.vendor ||
- f->cis_vendor == (u16) SDIO_ANY_ID) &&
- (f->cis_device == card->cis.device ||
- f->cis_device == (u16) SDIO_ANY_ID) &&
- (f->ext_csd_rev == EXT_CSD_REV_ANY ||
- f->ext_csd_rev == card->ext_csd.rev) &&
- rev >= f->rev_start && rev <= f->rev_end) {
- dev_dbg(&card->dev, "calling %ps\n", f->vendor_fixup);
- f->vendor_fixup(card, f->data);
- }
+ if (f->manfid != CID_MANFID_ANY &&
+ f->manfid != card->cid.manfid)
+ continue;
+ if (f->oemid != CID_OEMID_ANY &&
+ f->oemid != card->cid.oemid)
+ continue;
+ if (f->name != CID_NAME_ANY &&
+ strncmp(f->name, card->cid.prod_name,
+ sizeof(card->cid.prod_name)))
+ continue;
+ if (f->cis_vendor != (u16)SDIO_ANY_ID &&
+ f->cis_vendor != card->cis.vendor)
+ continue;
+ if (f->cis_device != (u16)SDIO_ANY_ID &&
+ f->cis_device != card->cis.device)
+ continue;
+ if (f->ext_csd_rev != EXT_CSD_REV_ANY &&
+ f->ext_csd_rev != card->ext_csd.rev)
+ continue;
+ if (rev < f->rev_start || rev > f->rev_end)
+ continue;
+ if (f->of_compatible &&
+ !mmc_fixup_of_compatible_match(card, f->of_compatible))
+ continue;
+ if (f->year != CID_YEAR_ANY && f->year != card->cid.year)
+ continue;
+ if (f->month != CID_MONTH_ANY && f->month != card->cid.month)
+ continue;
+
+ dev_dbg(&card->dev, "calling %ps\n", f->vendor_fixup);
+ f->vendor_fixup(card, f->data);
}
}
diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c
index 609201a467ef..a85179f1a4de 100644
--- a/drivers/mmc/core/regulator.c
+++ b/drivers/mmc/core/regulator.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
#include <linux/mmc/host.h>
@@ -110,6 +111,9 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
int result = 0;
int min_uV, max_uV;
+ if (IS_ERR(supply))
+ return 0;
+
if (vdd_bit) {
mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
@@ -223,6 +227,33 @@ int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
}
EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
+/**
+ * mmc_regulator_set_vqmmc2 - Set vqmmc2 as per the ios->vqmmc2_voltage
+ * @mmc: The mmc host to regulate
+ * @ios: The io bus settings
+ *
+ * Sets a new voltage level for the vqmmc2 regulator, which may correspond to
+ * the vdd2 regulator for an SD UHS-II interface. This function is expected to
+ * be called by mmc host drivers.
+ *
+ * Returns a negative error code on failure, zero if the voltage level was
+ * changed successfully or a positive value if the level didn't need to change.
+ */
+int mmc_regulator_set_vqmmc2(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ if (IS_ERR(mmc->supply.vqmmc2))
+ return -EINVAL;
+
+ switch (ios->vqmmc2_voltage) {
+ case MMC_VQMMC2_VOLTAGE_180:
+ return mmc_regulator_set_voltage_if_supported(
+ mmc->supply.vqmmc2, 1700000, 1800000, 1950000);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc2);
+
#else
static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
@@ -232,6 +263,82 @@ static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
#endif /* CONFIG_REGULATOR */
+/* To be called from a high-priority workqueue */
+void mmc_undervoltage_workfn(struct work_struct *work)
+{
+ struct mmc_supply *supply;
+ struct mmc_host *host;
+
+ supply = container_of(work, struct mmc_supply, uv_work);
+ host = container_of(supply, struct mmc_host, supply);
+
+ mmc_handle_undervoltage(host);
+}
+
+static int mmc_handle_regulator_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct mmc_supply *supply = container_of(nb, struct mmc_supply,
+ vmmc_nb);
+ struct mmc_host *host = container_of(supply, struct mmc_host, supply);
+ unsigned long flags;
+
+ switch (event) {
+ case REGULATOR_EVENT_UNDER_VOLTAGE:
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->undervoltage) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return NOTIFY_OK;
+ }
+
+ host->undervoltage = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ queue_work(system_highpri_wq, &host->supply.uv_work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * mmc_regulator_register_undervoltage_notifier - Register for undervoltage
+ * events
+ * @host: MMC host
+ *
+ * To be called by a bus driver when a card supporting graceful shutdown
+ * is attached.
+ */
+void mmc_regulator_register_undervoltage_notifier(struct mmc_host *host)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(host->supply.vmmc))
+ return;
+
+ host->supply.vmmc_nb.notifier_call = mmc_handle_regulator_event;
+ ret = regulator_register_notifier(host->supply.vmmc,
+ &host->supply.vmmc_nb);
+ if (ret)
+ dev_warn(mmc_dev(host), "Failed to register vmmc notifier: %d\n", ret);
+}
+
+/**
+ * mmc_regulator_unregister_undervoltage_notifier - Unregister undervoltage
+ * notifier
+ * @host: MMC host
+ */
+void mmc_regulator_unregister_undervoltage_notifier(struct mmc_host *host)
+{
+ if (IS_ERR_OR_NULL(host->supply.vmmc))
+ return;
+
+ regulator_unregister_notifier(host->supply.vmmc, &host->supply.vmmc_nb);
+ cancel_work_sync(&host->supply.uv_work);
+}
+
/**
* mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
* @mmc: the host to regulate
@@ -249,10 +356,13 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
+ mmc->supply.vqmmc2 = devm_regulator_get_optional(dev, "vqmmc2");
if (IS_ERR(mmc->supply.vmmc)) {
if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "vmmc regulator not available\n");
+
dev_dbg(dev, "No vmmc regulator found\n");
} else {
ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
@@ -264,10 +374,59 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
if (IS_ERR(mmc->supply.vqmmc)) {
if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "vqmmc regulator not available\n");
+
dev_dbg(dev, "No vqmmc regulator found\n");
}
+ if (IS_ERR(mmc->supply.vqmmc2)) {
+ if (PTR_ERR(mmc->supply.vqmmc2) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(dev, "No vqmmc2 regulator found\n");
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
+
+/**
+ * mmc_regulator_enable_vqmmc - enable VQMMC regulator for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. Enables the regulator for vqmmc.
+ * Keeps track of the enable status for ensuring that calls to
+ * regulator_enable/disable are balanced.
+ */
+int mmc_regulator_enable_vqmmc(struct mmc_host *mmc)
+{
+ int ret = 0;
+
+ if (!IS_ERR(mmc->supply.vqmmc) && !mmc->vqmmc_enabled) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret < 0)
+ dev_err(mmc_dev(mmc), "enabling vqmmc regulator failed\n");
+ else
+ mmc->vqmmc_enabled = true;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_enable_vqmmc);
+
+/**
+ * mmc_regulator_disable_vqmmc - disable VQMMC regulator for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. Disables the regulator for vqmmc.
+ * Keeps track of the enable status for ensuring that calls to
+ * regulator_enable/disable are balanced.
+ */
+void mmc_regulator_disable_vqmmc(struct mmc_host *mmc)
+{
+ if (!IS_ERR(mmc->supply.vqmmc) && mmc->vqmmc_enabled) {
+ regulator_disable(mmc->supply.vqmmc);
+ mmc->vqmmc_enabled = false;
+ }
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_disable_vqmmc);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4646b7a03db6..948948ca9b4a 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -11,7 +11,11 @@
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/string.h>
#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -23,6 +27,7 @@
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
+#include "quirks.h"
#include "sd.h"
#include "sd_ops.h"
@@ -52,21 +57,7 @@ static const unsigned int sd_au_size[] = {
SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
};
-#define UNSTUFF_BITS(resp,start,size) \
- ({ \
- const int __size = size; \
- const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
- const int __off = 3 - ((start) / 32); \
- const int __shft = (start) & 31; \
- u32 __res; \
- \
- __res = resp[__off] >> __shft; \
- if (__size + __shft > 32) \
- __res |= resp[__off-1] << ((32 - __shft) % 32); \
- __res & __mask; \
- })
-
-#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000
+#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000
#define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
struct sd_busy_data {
@@ -82,75 +73,85 @@ void mmc_decode_cid(struct mmc_card *card)
u32 *resp = card->raw_cid;
/*
+ * Add the raw card ID (cid) data to the entropy pool. It doesn't
+ * matter that not all of it is unique, it's just bonus entropy.
+ */
+ add_device_randomness(&card->raw_cid, sizeof(card->raw_cid));
+
+ /*
* SD doesn't currently have a version field so we will
* have to assume we can parse this.
*/
- card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
- card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
- card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
- card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
- card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
- card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
- card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
- card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4);
- card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4);
- card->cid.serial = UNSTUFF_BITS(resp, 24, 32);
- card->cid.year = UNSTUFF_BITS(resp, 12, 8);
- card->cid.month = UNSTUFF_BITS(resp, 8, 4);
+ card->cid.manfid = unstuff_bits(resp, 120, 8);
+ card->cid.oemid = unstuff_bits(resp, 104, 16);
+ card->cid.prod_name[0] = unstuff_bits(resp, 96, 8);
+ card->cid.prod_name[1] = unstuff_bits(resp, 88, 8);
+ card->cid.prod_name[2] = unstuff_bits(resp, 80, 8);
+ card->cid.prod_name[3] = unstuff_bits(resp, 72, 8);
+ card->cid.prod_name[4] = unstuff_bits(resp, 64, 8);
+ card->cid.hwrev = unstuff_bits(resp, 60, 4);
+ card->cid.fwrev = unstuff_bits(resp, 56, 4);
+ card->cid.serial = unstuff_bits(resp, 24, 32);
+ card->cid.year = unstuff_bits(resp, 12, 8);
+ card->cid.month = unstuff_bits(resp, 8, 4);
card->cid.year += 2000; /* SD cards year offset */
+
+ /* some product names may include trailing whitespace */
+ strim(card->cid.prod_name);
}
/*
* Given a 128-bit response, decode to our card CSD structure.
*/
-static int mmc_decode_csd(struct mmc_card *card)
+static int mmc_decode_csd(struct mmc_card *card, bool is_sduc)
{
struct mmc_csd *csd = &card->csd;
unsigned int e, m, csd_struct;
u32 *resp = card->raw_csd;
- csd_struct = UNSTUFF_BITS(resp, 126, 2);
+ csd_struct = unstuff_bits(resp, 126, 2);
switch (csd_struct) {
case 0:
- m = UNSTUFF_BITS(resp, 115, 4);
- e = UNSTUFF_BITS(resp, 112, 3);
+ m = unstuff_bits(resp, 115, 4);
+ e = unstuff_bits(resp, 112, 3);
csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10;
- csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
+ csd->taac_clks = unstuff_bits(resp, 104, 8) * 100;
- m = UNSTUFF_BITS(resp, 99, 4);
- e = UNSTUFF_BITS(resp, 96, 3);
+ m = unstuff_bits(resp, 99, 4);
+ e = unstuff_bits(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
- csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
+ csd->cmdclass = unstuff_bits(resp, 84, 12);
- e = UNSTUFF_BITS(resp, 47, 3);
- m = UNSTUFF_BITS(resp, 62, 12);
+ e = unstuff_bits(resp, 47, 3);
+ m = unstuff_bits(resp, 62, 12);
csd->capacity = (1 + m) << (e + 2);
- csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
- csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
- csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
- csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
- csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
- csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
- csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
- csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
+ csd->read_blkbits = unstuff_bits(resp, 80, 4);
+ csd->read_partial = unstuff_bits(resp, 79, 1);
+ csd->write_misalign = unstuff_bits(resp, 78, 1);
+ csd->read_misalign = unstuff_bits(resp, 77, 1);
+ csd->dsr_imp = unstuff_bits(resp, 76, 1);
+ csd->r2w_factor = unstuff_bits(resp, 26, 3);
+ csd->write_blkbits = unstuff_bits(resp, 22, 4);
+ csd->write_partial = unstuff_bits(resp, 21, 1);
- if (UNSTUFF_BITS(resp, 46, 1)) {
+ if (unstuff_bits(resp, 46, 1)) {
csd->erase_size = 1;
} else if (csd->write_blkbits >= 9) {
- csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
+ csd->erase_size = unstuff_bits(resp, 39, 7) + 1;
csd->erase_size <<= csd->write_blkbits - 9;
}
- if (UNSTUFF_BITS(resp, 13, 1))
+ if (unstuff_bits(resp, 13, 1))
mmc_card_set_readonly(card);
break;
case 1:
+ case 2:
/*
- * This is a block-addressed SDHC or SDXC card. Most
- * interesting fields are unused and have fixed
+ * This is a block-addressed SDHC, SDXC or SDUC card.
+ * Most interesting fields are unused and have fixed
* values. To avoid getting tripped by buggy cards,
* we assume those fixed values ourselves.
*/
@@ -159,18 +160,23 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->taac_ns = 0; /* Unused */
csd->taac_clks = 0; /* Unused */
- m = UNSTUFF_BITS(resp, 99, 4);
- e = UNSTUFF_BITS(resp, 96, 3);
+ m = unstuff_bits(resp, 99, 4);
+ e = unstuff_bits(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
- csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
- csd->c_size = UNSTUFF_BITS(resp, 48, 22);
+ csd->cmdclass = unstuff_bits(resp, 84, 12);
+
+ if (csd_struct == 1)
+ m = unstuff_bits(resp, 48, 22);
+ else
+ m = unstuff_bits(resp, 48, 28);
+ csd->c_size = m;
- /* SDXC cards have a minimum C_SIZE of 0x00FFFF */
- if (csd->c_size >= 0xFFFF)
+ if (csd->c_size >= 0x400000 && is_sduc)
+ mmc_card_set_ult_capacity(card);
+ else if (csd->c_size >= 0xFFFF)
mmc_card_set_ext_capacity(card);
- m = UNSTUFF_BITS(resp, 48, 22);
- csd->capacity = (1 + m) << 10;
+ csd->capacity = (1 + (typeof(sector_t))m) << 10;
csd->read_blkbits = 9;
csd->read_partial = 0;
@@ -181,7 +187,7 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->write_partial = 0;
csd->erase_size = 1;
- if (UNSTUFF_BITS(resp, 13, 1))
+ if (unstuff_bits(resp, 13, 1))
mmc_card_set_readonly(card);
break;
default:
@@ -198,7 +204,7 @@ static int mmc_decode_csd(struct mmc_card *card)
/*
* Given a 64-bit response, decode to our card SCR structure.
*/
-static int mmc_decode_scr(struct mmc_card *card)
+int mmc_decode_scr(struct mmc_card *card)
{
struct sd_scr *scr = &card->scr;
unsigned int scr_struct;
@@ -207,33 +213,33 @@ static int mmc_decode_scr(struct mmc_card *card)
resp[3] = card->raw_scr[1];
resp[2] = card->raw_scr[0];
- scr_struct = UNSTUFF_BITS(resp, 60, 4);
+ scr_struct = unstuff_bits(resp, 60, 4);
if (scr_struct != 0) {
pr_err("%s: unrecognised SCR structure version %d\n",
mmc_hostname(card->host), scr_struct);
return -EINVAL;
}
- scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
- scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
+ scr->sda_vsn = unstuff_bits(resp, 56, 4);
+ scr->bus_widths = unstuff_bits(resp, 48, 4);
if (scr->sda_vsn == SCR_SPEC_VER_2)
/* Check if Physical Layer Spec v3.0 is supported */
- scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);
+ scr->sda_spec3 = unstuff_bits(resp, 47, 1);
if (scr->sda_spec3) {
- scr->sda_spec4 = UNSTUFF_BITS(resp, 42, 1);
- scr->sda_specx = UNSTUFF_BITS(resp, 38, 4);
+ scr->sda_spec4 = unstuff_bits(resp, 42, 1);
+ scr->sda_specx = unstuff_bits(resp, 38, 4);
}
- if (UNSTUFF_BITS(resp, 55, 1))
+ if (unstuff_bits(resp, 55, 1))
card->erased_byte = 0xFF;
else
card->erased_byte = 0x0;
if (scr->sda_spec4)
- scr->cmds = UNSTUFF_BITS(resp, 32, 4);
+ scr->cmds = unstuff_bits(resp, 32, 4);
else if (scr->sda_spec3)
- scr->cmds = UNSTUFF_BITS(resp, 32, 2);
+ scr->cmds = unstuff_bits(resp, 32, 2);
/* SD Spec says: any SD Card shall set at least bits 0 and 2 */
if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) ||
@@ -279,17 +285,17 @@ static int mmc_read_ssr(struct mmc_card *card)
kfree(raw_ssr);
/*
- * UNSTUFF_BITS only works with four u32s so we have to offset the
+ * unstuff_bits only works with four u32s so we have to offset the
* bitfield positions accordingly.
*/
- au = UNSTUFF_BITS(card->raw_ssr, 428 - 384, 4);
+ au = unstuff_bits(card->raw_ssr, 428 - 384, 4);
if (au) {
if (au <= 9 || card->scr.sda_spec3) {
card->ssr.au = sd_au_size[au];
- es = UNSTUFF_BITS(card->raw_ssr, 408 - 384, 16);
- et = UNSTUFF_BITS(card->raw_ssr, 402 - 384, 6);
+ es = unstuff_bits(card->raw_ssr, 408 - 384, 16);
+ et = unstuff_bits(card->raw_ssr, 402 - 384, 6);
if (es && et) {
- eo = UNSTUFF_BITS(card->raw_ssr, 400 - 384, 2);
+ eo = unstuff_bits(card->raw_ssr, 400 - 384, 2);
card->ssr.erase_timeout = (et * 1000) / es;
card->ssr.erase_offset = eo * 1000;
}
@@ -303,7 +309,7 @@ static int mmc_read_ssr(struct mmc_card *card)
* starting SD5.1 discard is supported if DISCARD_SUPPORT (b313) is set
*/
resp[3] = card->raw_ssr[6];
- discard_support = UNSTUFF_BITS(resp, 313 - 288, 1);
+ discard_support = unstuff_bits(resp, 313 - 288, 1);
card->erase_arg = (card->scr.sda_specx && discard_support) ?
SD_DISCARD_ARG : SD_ERASE_ARG;
@@ -336,7 +342,7 @@ static int mmc_read_switch(struct mmc_card *card)
* The argument does not matter, as the support bits do not
* change with the arguments.
*/
- err = mmc_sd_switch(card, 0, 0, 0, status);
+ err = mmc_sd_switch(card, SD_SWITCH_CHECK, 0, 0, status);
if (err) {
/*
* If the host or the card can't do the switch,
@@ -353,7 +359,7 @@ static int mmc_read_switch(struct mmc_card *card)
}
if (status[13] & SD_MODE_HIGH_SPEED)
- card->sw_caps.hs_max_dtr = HIGH_SPEED_MAX_DTR;
+ card->sw_caps.hs_max_dtr = card->host->max_sd_hs_hz ?: HIGH_SPEED_MAX_DTR;
if (card->scr.sda_spec3) {
card->sw_caps.sd3_bus_mode = status[13];
@@ -392,7 +398,8 @@ int mmc_sd_switch_hs(struct mmc_card *card)
if (!status)
return -ENOMEM;
- err = mmc_sd_switch(card, 1, 0, HIGH_SPEED_BUS_SPEED, status);
+ err = mmc_sd_switch(card, SD_SWITCH_SET, 0,
+ HIGH_SPEED_BUS_SPEED, status);
if (err)
goto out;
@@ -424,7 +431,8 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
card_drv_type, &drv_type);
if (drive_strength) {
- err = mmc_sd_switch(card, 1, 2, drive_strength, status);
+ err = mmc_sd_switch(card, SD_SWITCH_SET, 2,
+ drive_strength, status);
if (err)
return err;
if ((status[15] & 0xF) != drive_strength) {
@@ -447,7 +455,7 @@ static void sd_update_bus_speed_mode(struct mmc_card *card)
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
- if (!mmc_host_uhs(card->host)) {
+ if (!mmc_host_can_uhs(card->host)) {
card->sd_bus_speed = 0;
return;
}
@@ -504,7 +512,7 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
return 0;
}
- err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
+ err = mmc_sd_switch(card, SD_SWITCH_SET, 0, card->sd_bus_speed, status);
if (err)
return err;
@@ -546,7 +554,7 @@ static u32 sd_get_host_max_current(struct mmc_host *host)
static int sd_set_current_limit(struct mmc_card *card, u8 *status)
{
- int current_limit = SD_SET_CURRENT_NO_CHANGE;
+ int current_limit = SD_SET_CURRENT_LIMIT_200;
int err;
u32 max_current;
@@ -590,12 +598,10 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status)
else if (max_current >= 400 &&
card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400)
current_limit = SD_SET_CURRENT_LIMIT_400;
- else if (max_current >= 200 &&
- card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200)
- current_limit = SD_SET_CURRENT_LIMIT_200;
- if (current_limit != SD_SET_CURRENT_NO_CHANGE) {
- err = mmc_sd_switch(card, 1, 3, current_limit, status);
+ if (current_limit != SD_SET_CURRENT_LIMIT_200) {
+ err = mmc_sd_switch(card, SD_SWITCH_SET, 3,
+ current_limit, status);
if (err)
return err;
@@ -609,6 +615,29 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status)
}
/*
+ * Determine if the card should tune or not.
+ */
+static bool mmc_sd_use_tuning(struct mmc_card *card)
+{
+ /*
+ * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
+ * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ */
+ if (mmc_host_is_spi(card->host))
+ return false;
+
+ switch (card->host->ios.timing) {
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ return true;
+ case MMC_TIMING_UHS_DDR50:
+ return !mmc_card_no_uhs_ddr50_tuning(card);
+ }
+
+ return false;
+}
+
+/*
* UHS-I specific initialization procedure
*/
static int mmc_sd_init_uhs_card(struct mmc_card *card)
@@ -651,14 +680,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
if (err)
goto out;
- /*
- * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
- * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
- */
- if (!mmc_host_is_spi(card->host) &&
- (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
- card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
- card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
+ if (mmc_sd_use_tuning(card)) {
err = mmc_execute_tuning(card);
/*
@@ -707,21 +729,19 @@ MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
-static ssize_t mmc_dsr_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t mmc_dsr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct mmc_card *card = mmc_dev_to_card(dev);
- struct mmc_host *host = card->host;
-
- if (card->csd.dsr_imp && host->dsr_req)
- return sprintf(buf, "0x%x\n", host->dsr);
- else
- /* return default DSR value */
- return sprintf(buf, "0x%x\n", 0x404);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ if (card->csd.dsr_imp && host->dsr_req)
+ return sysfs_emit(buf, "0x%x\n", host->dsr);
+ /* return default DSR value */
+ return sysfs_emit(buf, "0x%x\n", 0x404);
}
-static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
+static DEVICE_ATTR(dsr, 0444, mmc_dsr_show, NULL);
MMC_DEV_ATTR(vendor, "0x%04x\n", card->cis.vendor);
MMC_DEV_ATTR(device, "0x%04x\n", card->cis.device);
@@ -734,9 +754,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
\
if (num > card->num_info) \
return -ENODATA; \
- if (!card->info[num-1][0]) \
+ if (!card->info[num - 1][0]) \
return 0; \
- return sprintf(buf, "%s\n", card->info[num-1]); \
+ return sysfs_emit(buf, "%s\n", card->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
@@ -786,7 +806,7 @@ static umode_t sd_std_is_visible(struct kobject *kobj, struct attribute *attr,
attr == &dev_attr_info2.attr ||
attr == &dev_attr_info3.attr ||
attr == &dev_attr_info4.attr
- ) && card->type != MMC_TYPE_SD_COMBO)
+ ) &&!mmc_card_sd_combo(card))
return 0;
return attr->mode;
@@ -798,7 +818,7 @@ static const struct attribute_group sd_std_group = {
};
__ATTRIBUTE_GROUPS(sd_std);
-struct device_type sd_type = {
+const struct device_type sd_type = {
.groups = sd_std_groups,
};
@@ -833,15 +853,18 @@ try_again:
* block-addressed SDHC cards.
*/
err = mmc_send_if_cond(host, ocr);
- if (!err)
+ if (!err) {
ocr |= SD_OCR_CCS;
+ /* Set HO2T as well - SDUC card won't respond otherwise */
+ ocr |= SD_OCR_2T;
+ }
/*
* If the host supports one of UHS-I modes, request the card
* to switch to 1.8V signaling level. If the card has failed
* repeatedly to switch however, skip this.
*/
- if (retries && mmc_host_uhs(host))
+ if (retries && mmc_host_can_uhs(host))
ocr |= SD_OCR_S18R;
/*
@@ -863,7 +886,8 @@ try_again:
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
+ if (!mmc_host_is_spi(host) && (ocr & SD_OCR_S18R) &&
+ rocr && (*rocr & SD_ROCR_S18A)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
@@ -878,7 +902,7 @@ try_again:
return err;
}
-int mmc_sd_get_csd(struct mmc_card *card)
+int mmc_sd_get_csd(struct mmc_card *card, bool is_sduc)
{
int err;
@@ -889,14 +913,14 @@ int mmc_sd_get_csd(struct mmc_card *card)
if (err)
return err;
- err = mmc_decode_csd(card);
+ err = mmc_decode_csd(card, is_sduc);
if (err)
return err;
return 0;
}
-static int mmc_sd_get_ro(struct mmc_host *host)
+int mmc_sd_get_ro(struct mmc_host *host)
{
int ro;
@@ -942,16 +966,17 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
/* Erase init depends on CSD and SSR */
mmc_init_erase(card);
-
- /*
- * Fetch switch information from card.
- */
- err = mmc_read_switch(card);
- if (err)
- return err;
}
/*
+ * Fetch switch information from card. Note, sd3_bus_mode can change if
+ * voltage switch outcome changes, so do this always.
+ */
+ err = mmc_read_switch(card);
+ if (err)
+ return err;
+
+ /*
* For SPI, enable CRC as appropriate.
* This CRC enable is located AFTER the reading of the
* card registers because some SDHC cards are not able
@@ -1108,7 +1133,7 @@ static int sd_parse_ext_reg_power(struct mmc_card *card, u8 fno, u8 page,
card->ext_power.rev = reg_buf[0] & 0xf;
/* Power Off Notification support at bit 4. */
- if (reg_buf[1] & BIT(4))
+ if ((reg_buf[1] & BIT(4)) && !mmc_card_broken_sd_poweroff_notify(card))
card->ext_power.feature_support |= SD_EXT_POWER_OFF_NOTIFY;
/* Power Sustenance support at bit 5. */
@@ -1161,7 +1186,7 @@ static int sd_parse_ext_reg_perf(struct mmc_card *card, u8 fno, u8 page,
card->ext_perf.feature_support |= SD_EXT_PERF_HOST_MAINT;
/* Cache support at bit 0. */
- if (reg_buf[4] & BIT(0))
+ if ((reg_buf[4] & BIT(0)) && !mmc_card_broken_sd_cache(card))
card->ext_perf.feature_support |= SD_EXT_PERF_CACHE;
/* Command queue support indicated via queue depth bits (0 to 4). */
@@ -1250,7 +1275,7 @@ static int sd_read_ext_regs(struct mmc_card *card)
*/
err = sd_read_ext_reg(card, 0, 0, 0, 512, gen_info_buf);
if (err) {
- pr_warn("%s: error %d reading general info of SD ext reg\n",
+ pr_err("%s: error %d reading general info of SD ext reg\n",
mmc_hostname(card->host), err);
goto out;
}
@@ -1264,7 +1289,12 @@ static int sd_read_ext_regs(struct mmc_card *card)
/* Number of extensions to be find. */
num_ext = gen_info_buf[4];
- /* We support revision 0, but limit it to 512 bytes for simplicity. */
+ /*
+ * We only support revision 0 and limit it to 512 bytes for simplicity.
+ * No matter what, let's return zero to allow us to continue using the
+ * card, even if we can't support the features from the SD function
+ * extensions registers.
+ */
if (rev != 0 || len > 512) {
pr_warn("%s: non-supported SD ext reg layout\n",
mmc_hostname(card->host));
@@ -1279,7 +1309,7 @@ static int sd_read_ext_regs(struct mmc_card *card)
for (i = 0; i < num_ext; i++) {
err = sd_parse_ext_reg(card, gen_info_buf, &next_ext_addr);
if (err) {
- pr_warn("%s: error %d parsing SD ext reg\n",
+ pr_err("%s: error %d parsing SD ext reg\n",
mmc_hostname(card->host), err);
goto out;
}
@@ -1438,7 +1468,10 @@ retry:
}
if (!oldcard) {
- err = mmc_sd_get_csd(card);
+ u32 sduc_arg = SD_OCR_CCS | SD_OCR_2T;
+ bool is_sduc = (rocr & sduc_arg) == sduc_arg;
+
+ err = mmc_sd_get_csd(card, is_sduc);
if (err)
goto free_card;
@@ -1461,6 +1494,9 @@ retry:
goto free_card;
}
+ /* Apply quirks prior to card setup */
+ mmc_fixup_device(card, mmc_sd_fixups);
+
err = mmc_sd_setup_card(host, card, oldcard != NULL);
if (err)
goto free_card;
@@ -1470,33 +1506,22 @@ retry:
* signaling. Detect that situation and try to initialize a UHS-I (1.8V)
* transfer mode.
*/
- if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
+ if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_can_uhs(host) &&
mmc_sd_card_using_v18(card) &&
host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
- /*
- * Re-read switch information in case it has changed since
- * oldcard was initialized.
- */
- if (oldcard) {
- err = mmc_read_switch(card);
- if (err)
- goto free_card;
- }
- if (mmc_sd_card_using_v18(card)) {
- if (mmc_host_set_uhs_voltage(host) ||
- mmc_sd_init_uhs_card(card)) {
- v18_fixup_failed = true;
- mmc_power_cycle(host, ocr);
- if (!oldcard)
- mmc_remove_card(card);
- goto retry;
- }
- goto done;
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
}
+ goto cont;
}
/* Initialization sequence for UHS-I cards */
- if (rocr & SD_ROCR_S18A && mmc_host_uhs(host)) {
+ if (rocr & SD_ROCR_S18A && mmc_host_can_uhs(host)) {
err = mmc_sd_init_uhs_card(card);
if (err)
goto free_card;
@@ -1515,6 +1540,13 @@ retry:
*/
mmc_set_clock(host, mmc_sd_get_max_clock(card));
+ if (host->ios.timing == MMC_TIMING_SD_HS &&
+ host->ops->prepare_sd_hs_tuning) {
+ err = host->ops->prepare_sd_hs_tuning(host, card);
+ if (err)
+ goto free_card;
+ }
+
/*
* Switch to wider bus (if supported).
*/
@@ -1526,8 +1558,15 @@ retry:
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
- }
+ if (host->ios.timing == MMC_TIMING_SD_HS &&
+ host->ops->execute_sd_hs_tuning) {
+ err = host->ops->execute_sd_hs_tuning(host, card);
+ if (err)
+ goto free_card;
+ }
+ }
+cont:
if (!oldcard) {
/* Read/parse the extension registers. */
err = sd_read_ext_regs(card);
@@ -1542,7 +1581,7 @@ retry:
goto free_card;
}
- if (host->cqe_ops && !host->cqe_enabled) {
+ if (!mmc_card_ult_capacity(card) && host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
if (!err) {
host->cqe_enabled = true;
@@ -1559,7 +1598,7 @@ retry:
err = -EINVAL;
goto free_card;
}
-done:
+
host->card = card;
return 0;
@@ -1571,15 +1610,6 @@ free_card:
}
/*
- * Host is being removed. Free up the current card.
- */
-static void mmc_sd_remove(struct mmc_host *host)
-{
- mmc_remove_card(host->card);
- host->card = NULL;
-}
-
-/*
* Card detection - card is alive.
*/
static int mmc_sd_alive(struct mmc_host *host)
@@ -1604,7 +1634,8 @@ static void mmc_sd_detect(struct mmc_host *host)
mmc_put_card(host->card, NULL);
if (err) {
- mmc_sd_remove(host);
+ mmc_remove_card(host->card);
+ host->card = NULL;
mmc_claim_host(host);
mmc_detach_bus(host);
@@ -1663,9 +1694,15 @@ static int sd_poweroff_notify(struct mmc_card *card)
goto out;
}
+ /* Find out when the command is completed. */
+ err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+ MMC_BUSY_EXTR_SINGLE);
+ if (err)
+ goto out;
+
cb_data.card = card;
cb_data.reg_buf = reg_buf;
- err = __mmc_poll_for_busy(card, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
+ err = __mmc_poll_for_busy(card->host, 0, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
&sd_busy_poweroff_notify_cb, &cb_data);
out:
@@ -1699,6 +1736,19 @@ out:
}
/*
+ * Host is being removed. Free up the current card and do a graceful power-off.
+ */
+static void mmc_sd_remove(struct mmc_host *host)
+{
+ get_device(&host->card->dev);
+ mmc_remove_card(host->card);
+
+ _mmc_sd_suspend(host);
+
+ put_device(&host->card->dev);
+ host->card = NULL;
+}
+/*
* Callback for suspend
*/
static int mmc_sd_suspend(struct mmc_host *host)
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index 1af5a038bae9..301dc34b8b63 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -4,13 +4,15 @@
#include <linux/types.h>
-extern struct device_type sd_type;
+extern const struct device_type sd_type;
struct mmc_host;
struct mmc_card;
int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
-int mmc_sd_get_csd(struct mmc_card *card);
+int mmc_sd_get_csd(struct mmc_card *card, bool is_sduc);
+int mmc_decode_scr(struct mmc_card *card);
+int mmc_sd_get_ro(struct mmc_host *host);
void mmc_decode_cid(struct mmc_card *card);
int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
bool reinit);
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index ef8d1dce5af1..cd86463dd306 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -16,9 +16,24 @@
#include <linux/mmc/sd.h>
#include "core.h"
+#include "card.h"
#include "sd_ops.h"
#include "mmc_ops.h"
+/*
+ * Extensive testing has shown that some specific SD cards
+ * require an increased command timeout to be successfully
+ * initialized.
+ */
+#define SD_APP_OP_COND_PERIOD_US (10 * 1000) /* 10ms */
+#define SD_APP_OP_COND_TIMEOUT_MS 2000 /* 2s */
+
+struct sd_app_op_cond_busy_data {
+ struct mmc_host *host;
+ u32 ocr;
+ struct mmc_command *cmd;
+};
+
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
{
int err;
@@ -27,6 +42,15 @@ int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
if (WARN_ON(card && card->host != host))
return -EINVAL;
+ /*
+ * UHS2 packet has APP bit so only set APP_CMD flag here.
+ * Will set the APP bit when assembling UHS2 packet.
+ */
+ if (host->uhs2_sd_tran) {
+ host->uhs2_app_cmd = true;
+ return 0;
+ }
+
cmd.opcode = MMC_APP_CMD;
if (card) {
@@ -115,10 +139,45 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
return mmc_wait_for_app_cmd(card->host, card, &cmd);
}
+static int sd_app_op_cond_cb(void *cb_data, bool *busy)
+{
+ struct sd_app_op_cond_busy_data *data = cb_data;
+ struct mmc_host *host = data->host;
+ struct mmc_command *cmd = data->cmd;
+ u32 ocr = data->ocr;
+ int err;
+
+ *busy = false;
+
+ err = mmc_wait_for_app_cmd(host, NULL, cmd);
+ if (err)
+ return err;
+
+ /* If we're just probing, do a single pass. */
+ if (ocr == 0)
+ return 0;
+
+ /* Wait until reset completes. */
+ if (mmc_host_is_spi(host)) {
+ if (!(cmd->resp[0] & R1_SPI_IDLE))
+ return 0;
+ } else if (cmd->resp[0] & MMC_CARD_BUSY) {
+ return 0;
+ }
+
+ *busy = true;
+ return 0;
+}
+
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {};
- int i, err = 0;
+ struct sd_app_op_cond_busy_data cb_data = {
+ .host = host,
+ .ocr = ocr,
+ .cmd = &cmd
+ };
+ int err;
cmd.opcode = SD_APP_OP_COND;
if (mmc_host_is_spi(host))
@@ -127,36 +186,30 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
cmd.arg = ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
- for (i = 100; i; i--) {
- err = mmc_wait_for_app_cmd(host, NULL, &cmd);
- if (err)
- break;
-
- /* if we're just probing, do a single pass */
- if (ocr == 0)
- break;
-
- /* otherwise wait until reset completes */
- if (mmc_host_is_spi(host)) {
- if (!(cmd.resp[0] & R1_SPI_IDLE))
- break;
- } else {
- if (cmd.resp[0] & MMC_CARD_BUSY)
- break;
- }
+ err = __mmc_poll_for_busy(host, SD_APP_OP_COND_PERIOD_US,
+ SD_APP_OP_COND_TIMEOUT_MS, &sd_app_op_cond_cb,
+ &cb_data);
+ if (err)
+ return err;
- err = -ETIMEDOUT;
+ if (rocr && !mmc_host_is_spi(host))
+ *rocr = cmd.resp[0];
- mmc_delay(10);
- }
+ return 0;
+}
- if (!i)
- pr_err("%s: card never left busy state\n", mmc_hostname(host));
+int mmc_send_ext_addr(struct mmc_host *host, u32 addr)
+{
+ struct mmc_command cmd = {
+ .opcode = SD_ADDR_EXT,
+ .arg = addr,
+ .flags = MMC_RSP_R1 | MMC_CMD_AC,
+ };
- if (rocr && !mmc_host_is_spi(host))
- *rocr = cmd.resp[0];
+ if (!mmc_card_ult_capacity(host->card))
+ return 0;
- return err;
+ return mmc_wait_for_cmd(host, &cmd, 0);
}
static int __mmc_send_if_cond(struct mmc_host *host, u32 ocr, u8 pcie_bits,
@@ -307,14 +360,13 @@ int mmc_app_send_scr(struct mmc_card *card)
return 0;
}
-int mmc_sd_switch(struct mmc_card *card, int mode, int group,
+int mmc_sd_switch(struct mmc_card *card, bool mode, int group,
u8 value, u8 *resp)
{
u32 cmd_args;
/* NOTE: caller guarantees resp is heap-allocated */
- mode = !!mode;
value &= 0xF;
cmd_args = mode << 31 | 0x00FFFFFF;
cmd_args &= ~(0xF << (group * 4));
@@ -323,6 +375,7 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
return mmc_send_adtc_data(card, card->host, SD_SWITCH, cmd_args, resp,
64);
}
+EXPORT_SYMBOL_GPL(mmc_sd_switch);
int mmc_app_sd_status(struct mmc_card *card, void *ssr)
{
diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h
index 3ba7b3cf4652..8fffc1b29757 100644
--- a/drivers/mmc/core/sd_ops.h
+++ b/drivers/mmc/core/sd_ops.h
@@ -12,6 +12,7 @@
struct mmc_card;
struct mmc_host;
+struct mmc_request;
int mmc_app_set_bus_width(struct mmc_card *card, int width);
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
@@ -19,10 +20,10 @@ int mmc_send_if_cond(struct mmc_host *host, u32 ocr);
int mmc_send_if_cond_pcie(struct mmc_host *host, u32 ocr);
int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca);
int mmc_app_send_scr(struct mmc_card *card);
-int mmc_sd_switch(struct mmc_card *card, int mode, int group,
- u8 value, u8 *resp);
int mmc_app_sd_status(struct mmc_card *card, void *ssr);
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card);
+int mmc_send_ext_addr(struct mmc_host *host, u32 addr);
+void mmc_uhs2_prepare_cmd(struct mmc_host *host, struct mmc_request *mrq);
#endif
diff --git a/drivers/mmc/core/sd_uhs2.c b/drivers/mmc/core/sd_uhs2.c
new file mode 100644
index 000000000000..de17d1611290
--- /dev/null
+++ b/drivers/mmc/core/sd_uhs2.c
@@ -0,0 +1,1304 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Linaro Ltd
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * Copyright (C) 2014 Intel Corp, All Rights Reserved.
+ * Author: Yi Sun <yi.y.sun@intel.com>
+ *
+ * Copyright (C) 2020 Genesys Logic, Inc.
+ * Authors: Ben Chuang <ben.chuang@genesyslogic.com.tw>
+ *
+ * Copyright (C) 2020 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * Copyright (C) 2022 Genesys Logic, Inc.
+ * Authors: Jason Lai <jason.lai@genesyslogic.com.tw>
+ *
+ * Copyright (C) 2023 Genesys Logic, Inc.
+ * Authors: Victor Shih <victor.shih@genesyslogic.com.tw>
+ *
+ * Support for SD UHS-II cards
+ */
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sd_uhs2.h>
+
+#include "card.h"
+#include "core.h"
+#include "bus.h"
+#include "sd.h"
+#include "sd_ops.h"
+#include "mmc_ops.h"
+
+#define UHS2_WAIT_CFG_COMPLETE_PERIOD_US (1 * 1000)
+#define UHS2_WAIT_CFG_COMPLETE_TIMEOUT_MS 100
+
+static const unsigned int sd_uhs2_freqs[] = { 52000000, 26000000 };
+
+struct sd_uhs2_wait_active_state_data {
+ struct mmc_host *host;
+ struct mmc_command *cmd;
+};
+
+static int sd_uhs2_power_up(struct mmc_host *host)
+{
+ if (host->ios.power_mode == MMC_POWER_ON)
+ return 0;
+
+ host->ios.vdd = fls(host->ocr_avail) - 1;
+ host->ios.clock = host->f_init;
+ host->ios.timing = MMC_TIMING_UHS2_SPEED_A;
+ host->ios.power_mode = MMC_POWER_ON;
+
+ return host->ops->uhs2_control(host, UHS2_SET_IOS);
+}
+
+static int sd_uhs2_power_off(struct mmc_host *host)
+{
+ int err;
+
+ if (host->ios.power_mode == MMC_POWER_OFF)
+ return 0;
+
+ host->ios.vdd = 0;
+ host->ios.clock = 0;
+ host->ios.power_mode = MMC_POWER_OFF;
+ host->uhs2_sd_tran = false;
+
+ err = host->ops->uhs2_control(host, UHS2_SET_IOS);
+ if (err)
+ return err;
+
+ /* For consistency, let's restore the initial timing. */
+ host->ios.timing = MMC_TIMING_LEGACY;
+ return 0;
+}
+
+/*
+ * Run the phy initialization sequence, which mainly relies on the UHS-II host
+ * to check that we reach the expected electrical state, between the host and
+ * the card.
+ */
+static int sd_uhs2_phy_init(struct mmc_host *host)
+{
+ int err;
+
+ err = host->ops->uhs2_control(host, UHS2_PHY_INIT);
+ if (err) {
+ pr_debug("%s: failed to initial phy for UHS-II!\n",
+ mmc_hostname(host));
+ }
+
+ return err;
+}
+
+/*
+ * sd_uhs2_cmd_assemble() - build up UHS-II command packet which is embedded in
+ * mmc_command structure
+ * @cmd: MMC command to executed
+ * @uhs2_cmd: UHS2 command corresponded to MMC command
+ * @header: Header field of UHS-II command cxpacket
+ * @arg: Argument field of UHS-II command packet
+ * @payload: Payload field of UHS-II command packet
+ * @plen: Payload length
+ * @resp: Response buffer is allocated by caller and it is used to keep
+ * the response of CM-TRAN command. For SD-TRAN command, uhs2_resp
+ * should be null and SD-TRAN command response should be stored in
+ * resp of mmc_command.
+ * @resp_len: Response buffer length
+ *
+ * The uhs2_command structure contains message packets which are transmited/
+ * received on UHS-II bus. This function fills in the contents of uhs2_command
+ * structure and embededs UHS2 command into mmc_command structure, which is used
+ * in legacy SD operation functions.
+ *
+ */
+static void sd_uhs2_cmd_assemble(struct mmc_command *cmd,
+ struct uhs2_command *uhs2_cmd,
+ u8 plen, u8 resp_len)
+{
+ uhs2_cmd->payload_len = plen * sizeof(u32);
+ uhs2_cmd->packet_len = uhs2_cmd->payload_len + 4;
+
+ cmd->uhs2_cmd = uhs2_cmd;
+ cmd->uhs2_cmd->uhs2_resp_len = resp_len;
+}
+
+/*
+ * Do the early initialization of the card, by sending the device init broadcast
+ * command and wait for the process to be completed.
+ */
+static int sd_uhs2_dev_init(struct mmc_host *host)
+{
+ struct mmc_command cmd = {0};
+ struct uhs2_command uhs2_cmd = {};
+ u32 cnt;
+ u32 dap, gap, resp_gap;
+ u32 payload0;
+ u8 gd = 0;
+ int err;
+
+ dap = host->uhs2_caps.dap;
+ gap = host->uhs2_caps.gap;
+
+ /*
+ * Refer to UHS-II Addendum Version 1.02 Figure 6-21 to see DEVICE_INIT CCMD format.
+ * Head:
+ * - Control Write(R/W=1) with 4-Byte payload(PLEN=01b).
+ * - IOADR = CMD_BASE + 002h
+ * Payload:
+ * - bit [3:0] : GAP(Group Allocated Power)
+ * - bit [7:4] : GD(Group Descriptor)
+ * - bit [11] : Complete Flag
+ * - bit [15:12]: DAP(Device Allocated Power)
+ */
+ uhs2_cmd.header = UHS2_NATIVE_PACKET | UHS2_PACKET_TYPE_CCMD;
+ uhs2_cmd.arg = ((UHS2_DEV_CMD_DEVICE_INIT & 0xFF) << 8) |
+ UHS2_NATIVE_CMD_WRITE |
+ UHS2_NATIVE_CMD_PLEN_4B |
+ (UHS2_DEV_CMD_DEVICE_INIT >> 8);
+
+ /*
+ * Refer to UHS-II Addendum Version 1.02 section 6.3.1.
+ * Max. time from DEVICE_INIT CCMD EOP reception on Device
+ * Rx to its SOP transmission on Device Tx(Tfwd_init_cmd) is
+ * 1 second.
+ */
+ cmd.busy_timeout = 1000;
+
+ /*
+ * Refer to UHS-II Addendum Version 1.02 section 6.2.6.3.
+ * Let's retry the DEVICE_INIT command no more than 30 times.
+ */
+ for (cnt = 0; cnt < 30; cnt++) {
+ payload0 = ((dap & 0xF) << 12) |
+ UHS2_DEV_INIT_COMPLETE_FLAG |
+ ((gd & 0xF) << 4) |
+ (gap & 0xF);
+ uhs2_cmd.payload[0] = (__force __be32)payload0;
+
+ sd_uhs2_cmd_assemble(&cmd, &uhs2_cmd, UHS2_DEV_INIT_PAYLOAD_LEN,
+ UHS2_DEV_INIT_RESP_LEN);
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+
+ if (err) {
+ pr_err("%s: %s: UHS2 CMD send fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ continue;
+ }
+
+ if (uhs2_cmd.uhs2_resp[3] != (UHS2_DEV_CMD_DEVICE_INIT & 0xFF)) {
+ pr_err("%s: DEVICE_INIT response is wrong!\n",
+ mmc_hostname(host));
+ return -EIO;
+ }
+
+ if (uhs2_cmd.uhs2_resp[5] & 0x8) {
+ host->uhs2_caps.group_desc = gd;
+ return 0;
+ }
+ resp_gap = uhs2_cmd.uhs2_resp[4] & 0x0F;
+ if (gap == resp_gap)
+ gd++;
+ }
+
+ if (err) {
+ pr_err("%s: %s: UHS2 CMD send fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * Run the enumeration process by sending the enumerate command to the card.
+ * Note that, we currently support only the point to point connection, which
+ * means only one card can be attached per host/slot.
+ */
+static int sd_uhs2_enum(struct mmc_host *host, u32 *node_id)
+{
+ struct mmc_command cmd = {0};
+ struct uhs2_command uhs2_cmd = {};
+ u32 payload0;
+ u8 id_f = 0xF, id_l = 0x0;
+ int err;
+
+ /*
+ * Refer to UHS-II Addendum Version 1.02 Figure 6-28 to see ENUMERATE CCMD format.
+ * Header:
+ * - Control Write(R/W=1) with 4-Byte payload(PLEN=01b).
+ * - IOADR = CMD_BASE + 003h
+ * Payload:
+ * - bit [3:0]: ID_L(Last Node ID)
+ * - bit [7:4]: ID_F(First Node ID)
+ */
+ uhs2_cmd.header = UHS2_NATIVE_PACKET | UHS2_PACKET_TYPE_CCMD;
+ uhs2_cmd.arg = ((UHS2_DEV_CMD_ENUMERATE & 0xFF) << 8) |
+ UHS2_NATIVE_CMD_WRITE |
+ UHS2_NATIVE_CMD_PLEN_4B |
+ (UHS2_DEV_CMD_ENUMERATE >> 8);
+
+ payload0 = (id_f << 4) | id_l;
+ uhs2_cmd.payload[0] = cpu_to_be32(payload0);
+
+ sd_uhs2_cmd_assemble(&cmd, &uhs2_cmd, UHS2_DEV_ENUM_PAYLOAD_LEN, UHS2_DEV_ENUM_RESP_LEN);
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err) {
+ pr_err("%s: %s: UHS2 CMD send fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ if (uhs2_cmd.uhs2_resp[3] != (UHS2_DEV_CMD_ENUMERATE & 0xFF)) {
+ pr_err("%s: ENUMERATE response is wrong!\n",
+ mmc_hostname(host));
+ return -EIO;
+ }
+
+ id_f = (uhs2_cmd.uhs2_resp[4] >> 4) & 0xF;
+ id_l = uhs2_cmd.uhs2_resp[4] & 0xF;
+ *node_id = id_f;
+
+ return 0;
+}
+
+/*
+ * Read the UHS-II configuration registers (CFG_REG) of the card, by sending it
+ * commands and by parsing the responses. Store a copy of the relevant data in
+ * card->uhs2_config.
+ */
+static int sd_uhs2_config_read(struct mmc_host *host, struct mmc_card *card)
+{
+ struct mmc_command cmd = {0};
+ struct uhs2_command uhs2_cmd = {};
+ u32 cap;
+ int err;
+
+ /*
+ * Use Control Read CCMD to read Generic Capability from Configuration Register.
+ * - Control Write(R/W=1) with 4-Byte payload(PLEN=01b).
+ * - IOADR = Generic Capability Register(CFG_BASE + 000h)
+ */
+ uhs2_cmd.header = UHS2_NATIVE_PACKET | UHS2_PACKET_TYPE_CCMD | card->uhs2_config.node_id;
+ uhs2_cmd.arg = ((UHS2_DEV_CONFIG_GEN_CAPS & 0xFF) << 8) |
+ UHS2_NATIVE_CMD_READ |
+ UHS2_NATIVE_CMD_PLEN_4B |
+ (UHS2_DEV_CONFIG_GEN_CAPS >> 8);
+
+ /*
+ * There is no payload because per spec, there should be
+ * no payload field for read CCMD.
+ * Plen is set in arg. Per spec, plen for read CCMD
+ * represents the len of read data which is assigned in payload
+ * of following RES (p136).
+ */
+ sd_uhs2_cmd_assemble(&cmd, &uhs2_cmd, 0, 0);
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err) {
+ pr_err("%s: %s: UHS2 CMD send fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /*
+ * Generic Capability Register:
+ * bit [7:0] : Reserved
+ * bit [13:8] : Device-Specific Number of Lanes and Functionality
+ * bit 8: 2L-HD
+ * bit 9: 2D-1U FD
+ * bit 10: 1D-2U FD
+ * bit 11: 2D-2U FD
+ * Others: Reserved
+ * bit [14] : DADR Length
+ * 0: 4 bytes
+ * 1: Reserved
+ * bit [23:16]: Application Type
+ * bit 16: 0=Non-SD memory, 1=SD memory
+ * bit 17: 0=Non-SDIO, 1=SDIO
+ * bit 18: 0=Card, 1=Embedded
+ * bit [63:24]: Reserved
+ */
+ cap = cmd.resp[0];
+ card->uhs2_config.n_lanes =
+ (cap >> UHS2_DEV_CONFIG_N_LANES_POS) &
+ UHS2_DEV_CONFIG_N_LANES_MASK;
+ card->uhs2_config.dadr_len =
+ (cap >> UHS2_DEV_CONFIG_DADR_POS) &
+ UHS2_DEV_CONFIG_DADR_MASK;
+ card->uhs2_config.app_type =
+ (cap >> UHS2_DEV_CONFIG_APP_POS) &
+ UHS2_DEV_CONFIG_APP_MASK;
+
+ /*
+ * Use Control Read CCMD to read PHY Capability from Configuration Register.
+ * - Control Write(R/W=1) with 8-Byte payload(PLEN=10b).
+ * - IOADR = PHY Capability Register(CFG_BASE + 002h)
+ */
+ uhs2_cmd.arg = ((UHS2_DEV_CONFIG_PHY_CAPS & 0xFF) << 8) |
+ UHS2_NATIVE_CMD_READ |
+ UHS2_NATIVE_CMD_PLEN_8B |
+ (UHS2_DEV_CONFIG_PHY_CAPS >> 8);
+
+ sd_uhs2_cmd_assemble(&cmd, &uhs2_cmd, 0, 0);
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err) {
+ pr_err("%s: %s: UHS2 CMD send fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /*
+ * PHY Capability Register:
+ * bit [3:0] : PHY Minor Revision
+ * bit [5:4] : PHY Major Revision
+ * bit [15] : Support Hibernate Mode
+ * 0: Not support Hibernate Mode
+ * 1: Support Hibernate Mode
+ * bit [31:16]: Reserved
+ * bit [35:32]: Device-Specific N_LSS_SYN
+ * bit [39:36]: Device-Specific N_LSS_DIR
+ * bit [63:40]: Reserved
+ */
+ cap = cmd.resp[0];
+ card->uhs2_config.phy_minor_rev =
+ cap & UHS2_DEV_CONFIG_PHY_MINOR_MASK;
+ card->uhs2_config.phy_major_rev =
+ (cap >> UHS2_DEV_CONFIG_PHY_MAJOR_POS) &
+ UHS2_DEV_CONFIG_PHY_MAJOR_MASK;
+ card->uhs2_config.can_hibernate =
+ (cap >> UHS2_DEV_CONFIG_CAN_HIBER_POS) &
+ UHS2_DEV_CONFIG_CAN_HIBER_MASK;
+
+ cap = cmd.resp[1];
+ card->uhs2_config.n_lss_sync =
+ cap & UHS2_DEV_CONFIG_N_LSS_SYN_MASK;
+ card->uhs2_config.n_lss_dir =
+ (cap >> UHS2_DEV_CONFIG_N_LSS_DIR_POS) &
+ UHS2_DEV_CONFIG_N_LSS_DIR_MASK;
+ if (card->uhs2_config.n_lss_sync == 0)
+ card->uhs2_config.n_lss_sync = 16 << 2;
+ else
+ card->uhs2_config.n_lss_sync <<= 2;
+
+ if (card->uhs2_config.n_lss_dir == 0)
+ card->uhs2_config.n_lss_dir = 16 << 3;
+ else
+ card->uhs2_config.n_lss_dir <<= 3;
+
+ /*
+ * Use Control Read CCMD to read LINK/TRAN Capability from Configuration Register.
+ * - Control Write(R/W=1) with 8-Byte payload(PLEN=10b).
+ * - IOADR = LINK/TRAN Capability Register(CFG_BASE + 004h)
+ */
+ uhs2_cmd.arg = ((UHS2_DEV_CONFIG_LINK_TRAN_CAPS & 0xFF) << 8) |
+ UHS2_NATIVE_CMD_READ |
+ UHS2_NATIVE_CMD_PLEN_8B |
+ (UHS2_DEV_CONFIG_LINK_TRAN_CAPS >> 8);
+
+ sd_uhs2_cmd_assemble(&cmd, &uhs2_cmd, 0, 0);
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err) {
+ pr_err("%s: %s: UHS2 CMD send fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /*
+ * LINK/TRAN Capability Register:
+ * bit [3:0] : LINK_TRAN Minor Revision
+ * bit [5:4] : LINK/TRAN Major Revision
+ * bit [7:6] : Reserved
+ * bit [15:8] : Device-Specific N_FCU
+ * bit [18:16]: Device Type
+ * 001b=Host
+ * 010b=Device
+ * 011b=Reserved for CMD issuable Device
+ * bit [19] : Reserved
+ * bit [31:20]: Device-Specific MAX_BLKLEN
+ * bit [39:32]: Device-Specific N_DATA_GAP
+ * bit [63:40]: Reserved
+ */
+ cap = cmd.resp[0];
+ card->uhs2_config.link_minor_rev =
+ cap & UHS2_DEV_CONFIG_LT_MINOR_MASK;
+ card->uhs2_config.link_major_rev =
+ (cap >> UHS2_DEV_CONFIG_LT_MAJOR_POS) &
+ UHS2_DEV_CONFIG_LT_MAJOR_MASK;
+ card->uhs2_config.n_fcu =
+ (cap >> UHS2_DEV_CONFIG_N_FCU_POS) &
+ UHS2_DEV_CONFIG_N_FCU_MASK;
+ card->uhs2_config.dev_type =
+ (cap >> UHS2_DEV_CONFIG_DEV_TYPE_POS) &
+ UHS2_DEV_CONFIG_DEV_TYPE_MASK;
+ card->uhs2_config.maxblk_len =
+ (cap >> UHS2_DEV_CONFIG_MAX_BLK_LEN_POS) &
+ UHS2_DEV_CONFIG_MAX_BLK_LEN_MASK;
+
+ cap = cmd.resp[1];
+ card->uhs2_config.n_data_gap =
+ cap & UHS2_DEV_CONFIG_N_DATA_GAP_MASK;
+ if (card->uhs2_config.n_fcu == 0)
+ card->uhs2_config.n_fcu = 256;
+
+ return 0;
+}
+
+/*
+ * Based on the card's and host's UHS-II capabilities, let's update the
+ * configuration of the card and the host. This may also include to move to a
+ * greater speed range/mode. Depending on the updated configuration, we may need
+ * to do a soft reset of the card via sending it a GO_DORMANT_STATE command.
+ *
+ * In the final step, let's check if the card signals "config completion", which
+ * indicates that the card has moved from config state into active state.
+ */
+static int sd_uhs2_config_write(struct mmc_host *host, struct mmc_card *card)
+{
+ struct mmc_command cmd = {0};
+ struct uhs2_command uhs2_cmd = {};
+ u32 payload0, payload1;
+ u8 nMinDataGap;
+ int err;
+
+ /*
+ * Use Control Write CCMD to set Generic Setting in Configuration Register.
+ * - Control Write(R/W=1) with 8-Byte payload(PLEN=10b).
+ * - IOADR = Generic Setting Register(CFG_BASE + 008h)
+ * - Payload = New contents to be written to Generic Setting Register
+ */
+ uhs2_cmd.header = UHS2_NATIVE_PACKET | UHS2_PACKET_TYPE_CCMD | card->uhs2_config.node_id;
+ uhs2_cmd.arg = ((UHS2_DEV_CONFIG_GEN_SET & 0xFF) << 8) |
+ UHS2_NATIVE_CMD_WRITE |
+ UHS2_NATIVE_CMD_PLEN_8B |
+ (UHS2_DEV_CONFIG_GEN_SET >> 8);
+
+ /*
+ * Most UHS-II cards only support FD and 2L-HD mode. Other lane numbers
+ * defined in UHS-II addendem Ver1.01 are optional.
+ */
+ host->uhs2_caps.n_lanes_set = UHS2_DEV_CONFIG_GEN_SET_2L_FD_HD;
+ card->uhs2_config.n_lanes_set = UHS2_DEV_CONFIG_GEN_SET_2L_FD_HD;
+
+ payload0 = card->uhs2_config.n_lanes_set << UHS2_DEV_CONFIG_N_LANES_POS;
+ payload1 = 0;
+ uhs2_cmd.payload[0] = cpu_to_be32(payload0);
+ uhs2_cmd.payload[1] = cpu_to_be32(payload1);
+
+ /*
+ * There is no payload because per spec, there should be
+ * no payload field for read CCMD.
+ * Plen is set in arg. Per spec, plen for read CCMD
+ * represents the len of read data which is assigned in payload
+ * of following RES (p136).
+ */
+ sd_uhs2_cmd_assemble(&cmd, &uhs2_cmd, UHS2_CFG_WRITE_PAYLOAD_LEN, 0);
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err) {
+ pr_err("%s: %s: UHS2 CMD send fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /*
+ * Use Control Write CCMD to set PHY Setting in Configuration Register.
+ * - Control Write(R/W=1) with 8-Byte payload(PLEN=10b).
+ * - IOADR = PHY Setting Register(CFG_BASE + 00Ah)
+ * - Payload = New contents to be written to PHY Setting Register
+ */
+ uhs2_cmd.arg = ((UHS2_DEV_CONFIG_PHY_SET & 0xFF) << 8) |
+ UHS2_NATIVE_CMD_WRITE |
+ UHS2_NATIVE_CMD_PLEN_8B |
+ (UHS2_DEV_CONFIG_PHY_SET >> 8);
+
+ if (host->uhs2_caps.speed_range == UHS2_DEV_CONFIG_PHY_SET_SPEED_B) {
+ if (card->uhs2_config.n_lanes == UHS2_DEV_CONFIG_2L_HD_FD &&
+ host->uhs2_caps.n_lanes == UHS2_DEV_CONFIG_2L_HD_FD) {
+ /* Support HD */
+ host->ios.timing = MMC_TIMING_UHS2_SPEED_B_HD;
+ nMinDataGap = 1;
+ } else {
+ /* Only support 2L-FD so far */
+ host->ios.timing = MMC_TIMING_UHS2_SPEED_B;
+ nMinDataGap = 3;
+ }
+ card->uhs2_config.speed_range_set = UHS2_DEV_CONFIG_PHY_SET_SPEED_B;
+ } else {
+ if (card->uhs2_config.n_lanes == UHS2_DEV_CONFIG_2L_HD_FD &&
+ host->uhs2_caps.n_lanes == UHS2_DEV_CONFIG_2L_HD_FD) {
+ /* Support HD */
+ host->ios.timing = MMC_TIMING_UHS2_SPEED_A_HD;
+ nMinDataGap = 1;
+ } else {
+ /* Only support 2L-FD so far */
+ host->ios.timing = MMC_TIMING_UHS2_SPEED_A;
+ nMinDataGap = 3;
+ }
+ card->uhs2_config.speed_range_set = UHS2_DEV_CONFIG_PHY_SET_SPEED_A;
+ }
+
+ payload0 = card->uhs2_config.speed_range_set << UHS2_DEV_CONFIG_PHY_SET_SPEED_POS;
+
+ card->uhs2_config.n_lss_sync_set = (max(card->uhs2_config.n_lss_sync,
+ host->uhs2_caps.n_lss_sync) >> 2) &
+ UHS2_DEV_CONFIG_N_LSS_SYN_MASK;
+ host->uhs2_caps.n_lss_sync_set = card->uhs2_config.n_lss_sync_set;
+
+ card->uhs2_config.n_lss_dir_set = (max(card->uhs2_config.n_lss_dir,
+ host->uhs2_caps.n_lss_dir) >> 3) &
+ UHS2_DEV_CONFIG_N_LSS_DIR_MASK;
+ host->uhs2_caps.n_lss_dir_set = card->uhs2_config.n_lss_dir_set;
+
+ payload1 = (card->uhs2_config.n_lss_dir_set << UHS2_DEV_CONFIG_N_LSS_DIR_POS) |
+ card->uhs2_config.n_lss_sync_set;
+ uhs2_cmd.payload[0] = cpu_to_be32(payload0);
+ uhs2_cmd.payload[1] = cpu_to_be32(payload1);
+
+ memset(uhs2_cmd.uhs2_resp, 0, sizeof(uhs2_cmd.uhs2_resp));
+
+ sd_uhs2_cmd_assemble(&cmd, &uhs2_cmd, UHS2_CFG_WRITE_PAYLOAD_LEN,
+ UHS2_CFG_WRITE_PHY_SET_RESP_LEN);
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err) {
+ pr_err("%s: %s: UHS2 CMD send fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ if ((uhs2_cmd.uhs2_resp[2] & 0x80)) {
+ pr_err("%s: %s: UHS2 CMD not accepted, resp= 0x%x!\n",
+ mmc_hostname(host), __func__, uhs2_cmd.uhs2_resp[2]);
+ return -EIO;
+ }
+
+ /*
+ * Use Control Write CCMD to set LINK/TRAN Setting in Configuration Register.
+ * - Control Write(R/W=1) with 8-Byte payload(PLEN=10b).
+ * - IOADR = LINK/TRAN Setting Register(CFG_BASE + 00Ch)
+ * - Payload = New contents to be written to LINK/TRAN Setting Register
+ */
+ uhs2_cmd.arg = ((UHS2_DEV_CONFIG_LINK_TRAN_SET & 0xFF) << 8) |
+ UHS2_NATIVE_CMD_WRITE |
+ UHS2_NATIVE_CMD_PLEN_8B |
+ (UHS2_DEV_CONFIG_LINK_TRAN_SET >> 8);
+
+ if (card->uhs2_config.app_type == UHS2_DEV_CONFIG_APP_SD_MEM)
+ card->uhs2_config.maxblk_len_set = UHS2_DEV_CONFIG_LT_SET_MAX_BLK_LEN;
+ else
+ card->uhs2_config.maxblk_len_set = min(card->uhs2_config.maxblk_len,
+ host->uhs2_caps.maxblk_len);
+ host->uhs2_caps.maxblk_len_set = card->uhs2_config.maxblk_len_set;
+
+ card->uhs2_config.n_fcu_set = min(card->uhs2_config.n_fcu, host->uhs2_caps.n_fcu);
+ host->uhs2_caps.n_fcu_set = card->uhs2_config.n_fcu_set;
+
+ card->uhs2_config.n_data_gap_set = max(nMinDataGap, card->uhs2_config.n_data_gap);
+ host->uhs2_caps.n_data_gap_set = card->uhs2_config.n_data_gap_set;
+
+ host->uhs2_caps.max_retry_set = 3;
+ card->uhs2_config.max_retry_set = host->uhs2_caps.max_retry_set;
+
+ payload0 = (card->uhs2_config.maxblk_len_set << UHS2_DEV_CONFIG_MAX_BLK_LEN_POS) |
+ (card->uhs2_config.max_retry_set << UHS2_DEV_CONFIG_LT_SET_MAX_RETRY_POS) |
+ (card->uhs2_config.n_fcu_set << UHS2_DEV_CONFIG_N_FCU_POS);
+ payload1 = card->uhs2_config.n_data_gap_set;
+ uhs2_cmd.payload[0] = cpu_to_be32(payload0);
+ uhs2_cmd.payload[1] = cpu_to_be32(payload1);
+
+ sd_uhs2_cmd_assemble(&cmd, &uhs2_cmd, UHS2_CFG_WRITE_PAYLOAD_LEN, 0);
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err) {
+ pr_err("%s: %s: UHS2 CMD send fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /*
+ * Use Control Write CCMD to set Config Completion(payload bit 63) in Generic Setting
+ * Register.
+ * Header:
+ * - Control Write(R/W=1) with 8-Byte payload(PLEN=10b).
+ * - IOADR = PGeneric Setting Register(CFG_BASE + 008h)
+ * Payload:
+ * - bit [63]: Config Completion
+ *
+ * DLSM transits to Active state immediately when Config Completion is set to 1.
+ */
+ uhs2_cmd.arg = ((UHS2_DEV_CONFIG_GEN_SET & 0xFF) << 8) |
+ UHS2_NATIVE_CMD_WRITE |
+ UHS2_NATIVE_CMD_PLEN_8B |
+ (UHS2_DEV_CONFIG_GEN_SET >> 8);
+
+ payload0 = 0;
+ payload1 = UHS2_DEV_CONFIG_GEN_SET_CFG_COMPLETE;
+ uhs2_cmd.payload[0] = cpu_to_be32(payload0);
+ uhs2_cmd.payload[1] = cpu_to_be32(payload1);
+
+ memset(uhs2_cmd.uhs2_resp, 0, sizeof(uhs2_cmd.uhs2_resp));
+ sd_uhs2_cmd_assemble(&cmd, &uhs2_cmd, UHS2_CFG_WRITE_PAYLOAD_LEN,
+ UHS2_CFG_WRITE_GENERIC_SET_RESP_LEN);
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err) {
+ pr_err("%s: %s: UHS2 CMD send fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /* Set host Config Setting registers */
+ err = host->ops->uhs2_control(host, UHS2_SET_CONFIG);
+ if (err) {
+ pr_err("%s: %s: UHS2 SET_CONFIG fail!\n", mmc_hostname(host), __func__);
+ return err;
+ }
+
+ return 0;
+}
+
+static int sd_uhs2_go_dormant(struct mmc_host *host, u32 node_id)
+{
+ struct mmc_command cmd = {0};
+ struct uhs2_command uhs2_cmd = {};
+ int err;
+
+ /* Disable Normal INT */
+ err = host->ops->uhs2_control(host, UHS2_DISABLE_INT);
+ if (err) {
+ pr_err("%s: %s: UHS2 DISABLE_INT fail!\n",
+ mmc_hostname(host), __func__);
+ return err;
+ }
+
+ /*
+ * Refer to UHS-II Addendum Version 1.02 Figure 6-17 to see GO_DORMANT_STATE CCMD format.
+ * Header:
+ * - Control Write(R/W=1) with 4-Byte payload(PLEN=01b).
+ * - IOADR = CMD_BASE + 001h
+ * Payload:
+ * - bit [7]: HBR(Entry to Hibernate Mode)
+ * 1: Host intends to enter Hibernate mode during Dormant state.
+ * The default setting is 0 because hibernate is currently not supported.
+ */
+ uhs2_cmd.header = UHS2_NATIVE_PACKET | UHS2_PACKET_TYPE_CCMD | node_id;
+ uhs2_cmd.arg = ((UHS2_DEV_CMD_GO_DORMANT_STATE & 0xFF) << 8) |
+ UHS2_NATIVE_CMD_WRITE |
+ UHS2_NATIVE_CMD_PLEN_4B |
+ (UHS2_DEV_CMD_GO_DORMANT_STATE >> 8);
+
+ sd_uhs2_cmd_assemble(&cmd, &uhs2_cmd, UHS2_GO_DORMANT_PAYLOAD_LEN, 0);
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err) {
+ pr_err("%s: %s: UHS2 CMD send fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /* Check Dormant State in Present */
+ err = host->ops->uhs2_control(host, UHS2_CHECK_DORMANT);
+ if (err)
+ return err;
+
+ /* Disable UHS2 card clock */
+ err = host->ops->uhs2_control(host, UHS2_DISABLE_CLK);
+ if (err)
+ return err;
+
+ /* Restore sd clock */
+ mmc_delay(5);
+ err = host->ops->uhs2_control(host, UHS2_ENABLE_CLK);
+ if (err)
+ return err;
+
+ /* Enable Normal INT */
+ err = host->ops->uhs2_control(host, UHS2_ENABLE_INT);
+ if (err)
+ return err;
+
+ /* Detect UHS2 */
+ err = host->ops->uhs2_control(host, UHS2_PHY_INIT);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int sd_uhs2_wait_active_state_cb(void *cb_data, bool *busy)
+{
+ struct sd_uhs2_wait_active_state_data *data = cb_data;
+ struct mmc_host *host = data->host;
+ struct mmc_command *cmd = data->cmd;
+ int err;
+
+ err = mmc_wait_for_cmd(host, cmd, 0);
+ if (err)
+ return err;
+
+ if (cmd->resp[1] & UHS2_DEV_CONFIG_GEN_SET_CFG_COMPLETE)
+ *busy = false;
+ else
+ *busy = true;
+
+ return 0;
+}
+
+static int sd_uhs2_go_dormant_state(struct mmc_host *host, u32 node_id)
+{
+ struct mmc_command cmd = {0};
+ struct uhs2_command uhs2_cmd = {};
+ int err;
+ struct sd_uhs2_wait_active_state_data cb_data = {
+ .host = host,
+ .cmd = &cmd
+ };
+
+ err = sd_uhs2_go_dormant(host, node_id);
+ if (err) {
+ pr_err("%s: %s: UHS2 GO_DORMANT_STATE fail, err= 0x%x!\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /*
+ * Use Control Read CCMD to check Config Completion(bit 63) in Generic Setting Register.
+ * - Control Read(R/W=0) with 8-Byte payload(PLEN=10b).
+ * - IOADR = Generic Setting Register(CFG_BASE + 008h)
+ *
+ * When UHS-II card been switched to new speed mode, it will set Config Completion to 1.
+ */
+ uhs2_cmd.header = UHS2_NATIVE_PACKET | UHS2_PACKET_TYPE_CCMD | node_id;
+ uhs2_cmd.arg = ((UHS2_DEV_CONFIG_GEN_SET & 0xFF) << 8) |
+ UHS2_NATIVE_CMD_READ |
+ UHS2_NATIVE_CMD_PLEN_8B |
+ (UHS2_DEV_CONFIG_GEN_SET >> 8);
+
+ sd_uhs2_cmd_assemble(&cmd, &uhs2_cmd, 0, 0);
+ err = __mmc_poll_for_busy(host, UHS2_WAIT_CFG_COMPLETE_PERIOD_US,
+ UHS2_WAIT_CFG_COMPLETE_TIMEOUT_MS,
+ &sd_uhs2_wait_active_state_cb, &cb_data);
+ if (err) {
+ pr_err("%s: %s: Not switch to Active in 100 ms\n", mmc_hostname(host), __func__);
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * Allocate the data structure for the mmc_card and run the UHS-II specific
+ * initialization sequence.
+ */
+static int sd_uhs2_init_card(struct mmc_host *host, struct mmc_card *oldcard)
+{
+ struct mmc_card *card;
+ u32 node_id = 0;
+ int err;
+
+ err = sd_uhs2_dev_init(host);
+ if (err)
+ return err;
+
+ err = sd_uhs2_enum(host, &node_id);
+ if (err)
+ return err;
+
+ if (oldcard) {
+ card = oldcard;
+ } else {
+ card = mmc_alloc_card(host, &sd_type);
+ if (IS_ERR(card))
+ return PTR_ERR(card);
+ }
+
+ card->uhs2_config.node_id = node_id;
+ card->type = MMC_TYPE_SD;
+
+ err = sd_uhs2_config_read(host, card);
+ if (err)
+ goto err;
+
+ err = sd_uhs2_config_write(host, card);
+ if (err)
+ goto err;
+
+ /* If change speed to Range B, need to GO_DORMANT_STATE */
+ if (host->ios.timing == MMC_TIMING_UHS2_SPEED_B ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD) {
+ err = sd_uhs2_go_dormant_state(host, node_id);
+ if (err)
+ goto err;
+ }
+
+ host->uhs2_sd_tran = true;
+ host->card = card;
+ return 0;
+
+err:
+ if (!oldcard)
+ mmc_remove_card(card);
+ return err;
+}
+
+/*
+ * Initialize the UHS-II card through the SD-TRAN transport layer. This enables
+ * commands/requests to be backwards compatible through the legacy SD protocol.
+ * UHS-II cards has a specific power limit specified for VDD1/VDD2, that should
+ * be set through a legacy CMD6. Note that, the power limit that becomes set,
+ * survives a soft reset through the GO_DORMANT_STATE command.
+ */
+static int sd_uhs2_legacy_init(struct mmc_host *host, struct mmc_card *card,
+ bool reinit)
+{
+ int err;
+ u32 cid[4];
+ u32 ocr;
+ u32 rocr;
+ u8 *status;
+ int ro;
+
+ /* Send CMD0 to reset SD card */
+ err = __mmc_go_idle(host);
+ if (err)
+ return err;
+
+ mmc_delay(1);
+
+ /* Send CMD8 to communicate SD interface operation condition */
+ err = mmc_send_if_cond(host, host->ocr_avail);
+ if (err)
+ return err;
+
+ /*
+ * Probe SD card working voltage.
+ */
+ err = mmc_send_app_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
+ card->ocr = ocr;
+
+ /*
+ * Some SD cards claims an out of spec VDD voltage range. Let's treat
+ * these bits as being in-valid and especially also bit7.
+ */
+ ocr &= ~0x7FFF;
+ rocr = mmc_select_voltage(host, ocr);
+ /*
+ * Some cards have zero value of rocr in UHS-II mode. Assign host's
+ * ocr value to rocr.
+ */
+ if (!rocr)
+ rocr = host->ocr_avail;
+
+ rocr |= (SD_OCR_CCS | SD_OCR_XPC);
+
+ /* Wait SD power on ready */
+ ocr = rocr;
+
+ err = mmc_send_app_op_cond(host, ocr, &rocr);
+ if (err)
+ return err;
+
+ err = mmc_send_cid(host, cid);
+ if (err)
+ return err;
+
+ if (reinit) {
+ if (memcmp(cid, card->raw_cid, sizeof(cid)) != 0) {
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
+ return -ENOENT;
+ }
+ } else {
+ memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
+ mmc_decode_cid(card);
+ }
+
+ /*
+ * For native busses: get card RCA and quit open drain mode.
+ */
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ return err;
+
+ err = mmc_sd_get_csd(card, false);
+ if (err)
+ return err;
+
+ /*
+ * Select card, as all following commands rely on that.
+ */
+ err = mmc_select_card(card);
+ if (err)
+ return err;
+
+ /*
+ * Fetch SCR from card.
+ */
+ err = mmc_app_send_scr(card);
+ if (err)
+ return err;
+
+ err = mmc_decode_scr(card);
+ if (err)
+ return err;
+
+ /*
+ * Switch to high power consumption mode.
+ * Even switch failed, sd card can still work at lower power consumption mode, but
+ * performance will be lower than high power consumption mode.
+ */
+ status = kmalloc(64, GFP_KERNEL);
+ if (!status)
+ return -ENOMEM;
+
+ if (!(card->csd.cmdclass & CCC_SWITCH)) {
+ pr_warn("%s: card lacks mandatory switch function, performance might suffer\n",
+ mmc_hostname(card->host));
+ } else {
+ /*
+ * Send CMD6 to set Maximum Power Consumption to get better
+ * performance. Ignore errors and continue.
+ */
+ err = mmc_sd_switch(card, 0, 3, SD4_SET_POWER_LIMIT_1_80W, status);
+ if (!err)
+ mmc_sd_switch(card, 1, 3, SD4_SET_POWER_LIMIT_1_80W, status);
+ }
+
+ /*
+ * Check if read-only switch is active.
+ */
+ ro = mmc_sd_get_ro(host);
+ if (ro < 0)
+ pr_warn("%s: host does not support read-only switch, assuming write-enable\n",
+ mmc_hostname(host));
+ else if (ro > 0)
+ mmc_card_set_readonly(card);
+
+ kfree(status);
+ return 0;
+}
+
+static int sd_uhs2_reinit(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ int err;
+
+ err = sd_uhs2_power_up(host);
+ if (err)
+ return err;
+
+ err = sd_uhs2_phy_init(host);
+ if (err)
+ return err;
+
+ err = sd_uhs2_init_card(host, card);
+ if (err)
+ return err;
+
+ return sd_uhs2_legacy_init(host, card, true);
+}
+
+static void sd_uhs2_remove(struct mmc_host *host)
+{
+ mmc_remove_card(host->card);
+ host->card = NULL;
+}
+
+static int sd_uhs2_alive(struct mmc_host *host)
+{
+ return mmc_send_status(host->card, NULL);
+}
+
+static void sd_uhs2_detect(struct mmc_host *host)
+{
+ int err;
+
+ mmc_get_card(host->card, NULL);
+ err = _mmc_detect_card_removed(host);
+ mmc_put_card(host->card, NULL);
+
+ if (err) {
+ sd_uhs2_remove(host);
+
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ sd_uhs2_power_off(host);
+ mmc_release_host(host);
+ }
+}
+
+static int _sd_uhs2_suspend(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+
+ mmc_claim_host(host);
+
+ if (mmc_card_suspended(card))
+ goto out;
+
+ sd_uhs2_power_off(host);
+ mmc_card_set_suspended(card);
+
+out:
+ mmc_release_host(host);
+ return 0;
+}
+
+/*
+ * Callback for suspend
+ */
+static int sd_uhs2_suspend(struct mmc_host *host)
+{
+ int err;
+
+ err = _sd_uhs2_suspend(host);
+ if (!err) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_suspended(&host->card->dev);
+ }
+
+ return err;
+}
+
+/*
+ * This function tries to determine if the same card is still present
+ * and, if so, restore all state to it.
+ */
+static int _mmc_sd_uhs2_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ mmc_claim_host(host);
+
+ if (!mmc_card_suspended(host->card))
+ goto out;
+
+ /* Power up UHS2 SD card and re-initialize it. */
+ err = sd_uhs2_reinit(host);
+ mmc_card_clr_suspended(host->card);
+
+out:
+ mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Callback for resume
+ */
+static int sd_uhs2_resume(struct mmc_host *host)
+{
+ pm_runtime_enable(&host->card->dev);
+ return 0;
+}
+
+/*
+ * Callback for runtime_suspend.
+ */
+static int sd_uhs2_runtime_suspend(struct mmc_host *host)
+{
+ int err;
+
+ if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
+ return 0;
+
+ err = _sd_uhs2_suspend(host);
+ if (err)
+ pr_err("%s: error %d doing aggressive suspend\n", mmc_hostname(host), err);
+
+ return err;
+}
+
+static int sd_uhs2_runtime_resume(struct mmc_host *host)
+{
+ int err;
+
+ err = _mmc_sd_uhs2_resume(host);
+ if (err && err != -ENOMEDIUM)
+ pr_err("%s: error %d doing runtime resume\n", mmc_hostname(host), err);
+
+ return err;
+}
+
+static int sd_uhs2_hw_reset(struct mmc_host *host)
+{
+ sd_uhs2_power_off(host);
+ /* Wait at least 1 ms according to SD spec */
+ mmc_delay(1);
+
+ return sd_uhs2_reinit(host);
+}
+
+static const struct mmc_bus_ops sd_uhs2_ops = {
+ .remove = sd_uhs2_remove,
+ .alive = sd_uhs2_alive,
+ .detect = sd_uhs2_detect,
+ .suspend = sd_uhs2_suspend,
+ .resume = sd_uhs2_resume,
+ .runtime_suspend = sd_uhs2_runtime_suspend,
+ .runtime_resume = sd_uhs2_runtime_resume,
+ .shutdown = sd_uhs2_suspend,
+ .hw_reset = sd_uhs2_hw_reset,
+};
+
+static int sd_uhs2_attach(struct mmc_host *host)
+{
+ int err;
+
+ err = sd_uhs2_power_up(host);
+ if (err)
+ goto err;
+
+ err = sd_uhs2_phy_init(host);
+ if (err)
+ goto err;
+
+ err = sd_uhs2_init_card(host, NULL);
+ if (err)
+ goto err;
+
+ err = sd_uhs2_legacy_init(host, host->card, false);
+ if (err)
+ goto remove_card;
+
+ mmc_attach_bus(host, &sd_uhs2_ops);
+
+ mmc_release_host(host);
+
+ err = mmc_add_card(host->card);
+ if (err)
+ goto remove_card;
+
+ mmc_claim_host(host);
+ return 0;
+
+remove_card:
+ sd_uhs2_remove(host);
+ mmc_claim_host(host);
+err:
+ mmc_detach_bus(host);
+ sd_uhs2_power_off(host);
+ return err;
+}
+
+/**
+ * mmc_attach_sd_uhs2 - select UHS2 interface
+ * @host: MMC host
+ *
+ * Try to select UHS2 interface and initialize the bus for a given
+ * frequency, @freq.
+ *
+ * Return: 0 on success, non-zero error on failure
+ */
+int mmc_attach_sd_uhs2(struct mmc_host *host)
+{
+ int i, err;
+
+ if (!(host->caps2 & MMC_CAP2_SD_UHS2))
+ return -EOPNOTSUPP;
+
+ /* Turn off the legacy SD interface before trying with UHS-II. */
+ mmc_power_off(host);
+
+ /*
+ * Start UHS-II initialization at 52MHz and possibly make a retry at
+ * 26MHz according to the spec. It's required that the host driver
+ * validates ios->clock, to set a rate within the correct range.
+ */
+ for (i = 0; i < ARRAY_SIZE(sd_uhs2_freqs); i++) {
+ host->f_init = sd_uhs2_freqs[i];
+ pr_debug("%s: %s: trying to init UHS-II card at %u Hz\n",
+ mmc_hostname(host), __func__, host->f_init);
+ err = sd_uhs2_attach(host);
+ if (!err)
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * mmc_uhs2_prepare_cmd - prepare for SD command packet
+ * @host: MMC host
+ * @mrq: MMC request
+ *
+ * Initialize and fill in a header and a payload of SD command packet.
+ * The caller should allocate uhs2_command in host->cmd->uhs2_cmd in
+ * advance.
+ *
+ * Return: 0 on success, non-zero error on failure
+ */
+void mmc_uhs2_prepare_cmd(struct mmc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_command *cmd;
+ struct uhs2_command *uhs2_cmd;
+ u8 plen;
+
+ cmd = mrq->cmd;
+ cmd->uhs2_cmd = &mrq->uhs2_cmd;
+ uhs2_cmd = cmd->uhs2_cmd;
+ uhs2_cmd->header = host->card->uhs2_config.node_id;
+ if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC)
+ uhs2_cmd->header |= UHS2_PACKET_TYPE_DCMD;
+ else
+ uhs2_cmd->header |= UHS2_PACKET_TYPE_CCMD;
+
+ uhs2_cmd->arg = cmd->opcode << UHS2_SD_CMD_INDEX_POS;
+ if (host->uhs2_app_cmd) {
+ uhs2_cmd->arg |= UHS2_SD_CMD_APP;
+ host->uhs2_app_cmd = false;
+ }
+
+ /*
+ * UHS-II Addendum 7.2.1.2
+ * Host may set DM to 1 for DCMD which supports multi-block read/write regardless of
+ * data transfer length (e.g., CMD18, CMD25). Otherwise, it shall not set DM to 1.
+ * (e.g., CMD6, CMD17, CMD24). These rules are also applied to other multi-block read/write
+ * commands defined in other Part of SD specifications (for example, Host may set DM to 1
+ * for ACMD18 or ACMD25).
+ */
+ if (mmc_op_multi(cmd->opcode))
+ cmd->uhs2_cmd->tmode_half_duplex = mmc_card_uhs2_hd_mode(host);
+ else
+ cmd->uhs2_cmd->tmode_half_duplex = 0;
+
+ uhs2_cmd = cmd->uhs2_cmd;
+ plen = 2; /* at the maximum */
+
+ if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC &&
+ cmd->uhs2_cmd->tmode_half_duplex) {
+ if (mmc_card_uhs2_hd_mode(host))
+ uhs2_cmd->arg |= UHS2_DCMD_2L_HD_MODE;
+
+ uhs2_cmd->arg |= UHS2_DCMD_LM_TLEN_EXIST;
+
+ if (cmd->data->blocks == 1 &&
+ cmd->data->blksz != 512 &&
+ cmd->opcode != MMC_READ_SINGLE_BLOCK &&
+ cmd->opcode != MMC_WRITE_BLOCK) {
+ uhs2_cmd->arg |= UHS2_DCMD_TLUM_BYTE_MODE;
+ uhs2_cmd->payload[1] = cpu_to_be32(cmd->data->blksz);
+ } else {
+ uhs2_cmd->payload[1] = cpu_to_be32(cmd->data->blocks);
+ }
+ } else {
+ plen = 1;
+ }
+
+ uhs2_cmd->payload[0] = cpu_to_be32(cmd->arg);
+ sd_uhs2_cmd_assemble(cmd, uhs2_cmd, plen, 0);
+}
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 68edf7a615be..83085e76486a 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -40,9 +41,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
\
if (num > card->num_info) \
return -ENODATA; \
- if (!card->info[num-1][0]) \
+ if (!card->info[num - 1][0]) \
return 0; \
- return sprintf(buf, "%s\n", card->info[num-1]); \
+ return sysfs_emit(buf, "%s\n", card->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
@@ -65,7 +66,7 @@ static struct attribute *sdio_std_attrs[] = {
};
ATTRIBUTE_GROUPS(sdio_std);
-static struct device_type sdio_type = {
+static const struct device_type sdio_type = {
.groups = sdio_std_groups,
};
@@ -197,7 +198,7 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
if (ret)
goto out;
- if (mmc_host_uhs(card->host)) {
+ if (mmc_host_can_uhs(card->host)) {
if (data & SDIO_UHS_DDR50)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_DDR50 | SD_MODE_UHS_SDR50
@@ -225,6 +226,20 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
if (data & SDIO_DRIVE_SDTD)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTERRUPT_EXT, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_INTERRUPT_EXT_SAI) {
+ data |= SDIO_INTERRUPT_EXT_EAI;
+ ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_INTERRUPT_EXT,
+ data, NULL);
+ if (ret)
+ goto out;
+
+ card->cccr.enable_async_irq = 1;
+ }
}
/* if no uhs mode ensure we check for high speed */
@@ -334,7 +349,7 @@ static int sdio_disable_4bit_bus(struct mmc_card *card)
{
int err;
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
goto out;
if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
@@ -359,7 +374,7 @@ static int sdio_enable_4bit_bus(struct mmc_card *card)
err = sdio_enable_wide(card);
if (err <= 0)
return err;
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
goto out;
if (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) {
@@ -414,7 +429,7 @@ static int sdio_enable_hs(struct mmc_card *card)
int ret;
ret = mmc_sdio_switch_hs(card, true);
- if (ret <= 0 || card->type == MMC_TYPE_SDIO)
+ if (ret <= 0 || mmc_card_sdio(card))
return ret;
ret = mmc_sd_switch_hs(card);
@@ -440,9 +455,11 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
max_dtr = card->cis.max_dtr;
}
- if (card->type == MMC_TYPE_SD_COMBO)
+ if (mmc_card_sd_combo(card))
max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
+ max_dtr = min_not_zero(max_dtr, card->quirk_max_rate);
+
return max_dtr;
}
@@ -510,7 +527,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
- if (!mmc_host_uhs(card->host))
+ if (!mmc_host_can_uhs(card->host))
return 0;
bus_speed = SDIO_SPEED_SDR12;
@@ -652,7 +669,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
WARN_ON(!host->claimed);
/* to query card if 1.8V signalling is supported */
- if (mmc_host_uhs(host))
+ if (mmc_host_can_uhs(host))
ocr |= R4_18V_PRESENT;
try_again:
@@ -688,7 +705,7 @@ try_again:
mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
card->type = MMC_TYPE_SD_COMBO;
- if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
+ if (oldcard && (!mmc_card_sd_combo(oldcard) ||
memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
err = -ENOENT;
goto mismatch;
@@ -696,7 +713,7 @@ try_again:
} else {
card->type = MMC_TYPE_SDIO;
- if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
+ if (oldcard && !mmc_card_sdio(oldcard)) {
err = -ENOENT;
goto mismatch;
}
@@ -707,6 +724,9 @@ try_again:
*/
if (host->ops->init_card)
host->ops->init_card(host, card);
+ mmc_fixup_device(card, sdio_card_init_methods);
+
+ card->ocr = ocr_card;
/*
* If the host and card support UHS-I mode request the card
@@ -750,8 +770,8 @@ try_again:
/*
* Read CSD, before selecting the card
*/
- if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
- err = mmc_sd_get_csd(card);
+ if (!oldcard && mmc_card_sd_combo(card)) {
+ err = mmc_sd_get_csd(card, false);
if (err)
goto remove;
@@ -820,10 +840,10 @@ try_again:
goto mismatch;
}
}
- card->ocr = ocr_card;
+
mmc_fixup_device(card, sdio_fixup_methods);
- if (card->type == MMC_TYPE_SD_COMBO) {
+ if (mmc_card_sd_combo(card)) {
err = mmc_sd_setup_card(host, card, oldcard != NULL);
/* handle as SDIO-only card if memory init failed */
if (err) {
@@ -925,7 +945,11 @@ static void mmc_sdio_remove(struct mmc_host *host)
*/
static int mmc_sdio_alive(struct mmc_host *host)
{
- return mmc_select_card(host->card);
+ if (!mmc_host_is_spi(host))
+ return mmc_select_card(host->card);
+ else
+ return mmc_io_rw_direct(host->card, 0, 0, SDIO_CCCR_CCCR, 0,
+ NULL);
}
/*
@@ -1025,7 +1049,7 @@ static int mmc_sdio_suspend(struct mmc_host *host)
/* Prevent processing of SDIO IRQs in suspended state. */
mmc_card_set_suspended(host->card);
- cancel_delayed_work_sync(&host->sdio_irq_work);
+ cancel_work_sync(&host->sdio_irq_work);
mmc_claim_host(host);
@@ -1071,8 +1095,14 @@ static int mmc_sdio_resume(struct mmc_host *host)
}
err = mmc_sdio_reinit_card(host);
} else if (mmc_card_wake_sdio_irq(host)) {
- /* We may have switched to 1-bit mode during suspend */
+ /*
+ * We may have switched to 1-bit mode during suspend,
+ * need to hold retuning, because tuning only supprt
+ * 4-bit mode or 8 bit mode.
+ */
+ mmc_retune_hold_now(host);
err = sdio_enable_4bit_bus(host->card);
+ mmc_retune_release(host);
}
if (err)
@@ -1085,7 +1115,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
wake_up_process(host->sdio_irq_thread);
else if (host->caps & MMC_CAP_SDIO_IRQ)
- queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+ schedule_work(&host->sdio_irq_work);
}
out:
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 3d709029e07c..10799772494a 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -14,6 +14,7 @@
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/acpi.h>
+#include <linux/sysfs.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -25,7 +26,7 @@
#include "sdio_cis.h"
#include "sdio_bus.h"
-#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
+#define to_sdio_driver(d) container_of_const(d, struct sdio_driver, drv)
/* show configuration fields */
#define sdio_config_attr(field, format_string, args...) \
@@ -35,7 +36,7 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
struct sdio_func *func; \
\
func = dev_to_sdio_func (dev); \
- return sprintf(buf, format_string, args); \
+ return sysfs_emit(buf, format_string, args); \
} \
static DEVICE_ATTR_RO(field)
@@ -52,9 +53,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
\
if (num > func->num_info) \
return -ENODATA; \
- if (!func->info[num-1][0]) \
+ if (!func->info[num - 1][0]) \
return 0; \
- return sprintf(buf, "%s\n", func->info[num-1]); \
+ return sysfs_emit(buf, "%s\n", func->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
@@ -90,7 +91,7 @@ static const struct sdio_device_id *sdio_match_one(struct sdio_func *func,
}
static const struct sdio_device_id *sdio_match_device(struct sdio_func *func,
- struct sdio_driver *sdrv)
+ const struct sdio_driver *sdrv)
{
const struct sdio_device_id *ids;
@@ -107,10 +108,10 @@ static const struct sdio_device_id *sdio_match_device(struct sdio_func *func,
return NULL;
}
-static int sdio_bus_match(struct device *dev, struct device_driver *drv)
+static int sdio_bus_match(struct device *dev, const struct device_driver *drv)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- struct sdio_driver *sdrv = to_sdio_driver(drv);
+ const struct sdio_driver *sdrv = to_sdio_driver(drv);
if (sdio_match_device(func, sdrv))
return 1;
@@ -119,16 +120,16 @@ static int sdio_bus_match(struct device *dev, struct device_driver *drv)
}
static int
-sdio_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+sdio_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct sdio_func *func = dev_to_sdio_func(dev);
+ const struct sdio_func *func = dev_to_sdio_func(dev);
unsigned int i;
if (add_uevent_var(env,
"SDIO_CLASS=%02X", func->class))
return -ENOMEM;
- if (add_uevent_var(env,
+ if (add_uevent_var(env,
"SDIO_ID=%04X:%04X", func->vendor, func->device))
return -ENOMEM;
@@ -160,7 +161,7 @@ static int sdio_bus_probe(struct device *dev)
if (!id)
return -ENODEV;
- ret = dev_pm_domain_attach(dev, false);
+ ret = dev_pm_domain_attach(dev, 0);
if (ret)
return ret;
@@ -199,11 +200,10 @@ disable_runtimepm:
atomic_dec(&func->card->sdio_funcs_probed);
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_noidle(dev);
- dev_pm_domain_detach(dev, false);
return ret;
}
-static int sdio_bus_remove(struct device *dev)
+static void sdio_bus_remove(struct device *dev)
{
struct sdio_driver *drv = to_sdio_driver(dev->driver);
struct sdio_func *func = dev_to_sdio_func(dev);
@@ -230,10 +230,6 @@ static int sdio_bus_remove(struct device *dev)
/* Then undo the runtime PM settings in sdio_bus_probe() */
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_sync(dev);
-
- dev_pm_domain_detach(dev, false);
-
- return 0;
}
static const struct dev_pm_ops sdio_bus_pm_ops = {
@@ -245,7 +241,7 @@ static const struct dev_pm_ops sdio_bus_pm_ops = {
)
};
-static struct bus_type sdio_bus_type = {
+static const struct bus_type sdio_bus_type = {
.name = "sdio",
.dev_groups = sdio_dev_groups,
.match = sdio_bus_match,
@@ -266,16 +262,19 @@ void sdio_unregister_bus(void)
}
/**
- * sdio_register_driver - register a function driver
+ * __sdio_register_driver - register a function driver
* @drv: SDIO function driver
+ * @owner: owning module/driver
*/
-int sdio_register_driver(struct sdio_driver *drv)
+int __sdio_register_driver(struct sdio_driver *drv, struct module *owner)
{
drv->drv.name = drv->name;
drv->drv.bus = &sdio_bus_type;
+ drv->drv.owner = owner;
+
return driver_register(&drv->drv);
}
-EXPORT_SYMBOL_GPL(sdio_register_driver);
+EXPORT_SYMBOL_GPL(__sdio_register_driver);
/**
* sdio_unregister_driver - unregister a function driver
@@ -292,7 +291,14 @@ static void sdio_release_func(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- sdio_free_func_cis(func);
+ if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
+ sdio_free_func_cis(func);
+
+ /*
+ * We have now removed the link to the tuples in the
+ * card structure, so remove the reference.
+ */
+ put_device(&func->card->dev);
kfree(func->info);
kfree(func->tmpbuf);
@@ -324,6 +330,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
device_initialize(&func->dev);
+ /*
+ * We may link to tuples in the card structure,
+ * we need make sure we have a reference to it.
+ */
+ get_device(&func->card->dev);
+
func->dev.parent = &card->dev;
func->dev.bus = &sdio_bus_type;
func->dev.release = sdio_release_func;
@@ -377,10 +389,9 @@ int sdio_add_func(struct sdio_func *func)
*/
void sdio_remove_func(struct sdio_func *func)
{
- if (!sdio_func_present(func))
- return;
+ if (sdio_func_present(func))
+ device_del(&func->dev);
- device_del(&func->dev);
of_node_put(func->dev.of_node);
put_device(&func->dev);
}
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index b23773583179..afaa6cab1adc 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -330,13 +330,25 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
prev = &this->next;
if (ret == -ENOENT) {
+
if (time_after(jiffies, timeout))
break;
- /* warn about unknown tuples */
- pr_warn_ratelimited("%s: queuing unknown"
- " CIS tuple 0x%02x (%u bytes)\n",
- mmc_hostname(card->host),
- tpl_code, tpl_link);
+
+#define FMT(type) "%s: queuing " type " CIS tuple 0x%02x [%*ph] (%u bytes)\n"
+ /*
+ * Tuples in this range are reserved for
+ * vendors, so don't warn about them
+ */
+ if (tpl_code >= 0x80 && tpl_code <= 0x8f)
+ pr_debug_ratelimited(FMT("vendor"),
+ mmc_hostname(card->host),
+ tpl_code, tpl_link, this->data,
+ tpl_link);
+ else
+ pr_warn_ratelimited(FMT("unknown"),
+ mmc_hostname(card->host),
+ tpl_code, tpl_link, this->data,
+ tpl_link);
}
/* keep on analyzing tuples */
@@ -392,12 +404,6 @@ int sdio_read_func_cis(struct sdio_func *func)
return ret;
/*
- * Since we've linked to tuples in the card structure,
- * we must make sure we have a reference to it.
- */
- get_device(&func->card->dev);
-
- /*
* Vendor/device id is optional for function CIS, so
* copy it from the card structure as needed.
*/
@@ -422,11 +428,5 @@ void sdio_free_func_cis(struct sdio_func *func)
}
func->tuples = NULL;
-
- /*
- * We have now removed the link to the tuples in the
- * card structure, so remove the reference.
- */
- put_device(&func->card->dev);
}
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 79dbf90216b5..b774bf51981d 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -766,7 +766,7 @@ EXPORT_SYMBOL_GPL(sdio_retune_crc_disable);
* sdio_retune_crc_enable - re-enable retuning on CRC errors
* @func: SDIO function attached to host
*
- * This is the compement to sdio_retune_crc_disable().
+ * This is the complement to sdio_retune_crc_disable().
*/
void sdio_retune_crc_enable(struct sdio_func *func)
{
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 4b1f7c966ec8..2b24bdf38296 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -124,7 +124,7 @@ static void sdio_run_irqs(struct mmc_host *host)
void sdio_irq_work(struct work_struct *work)
{
struct mmc_host *host =
- container_of(work, struct mmc_host, sdio_irq_work.work);
+ container_of(work, struct mmc_host, sdio_irq_work);
sdio_run_irqs(host);
}
@@ -132,7 +132,7 @@ void sdio_irq_work(struct work_struct *work)
void sdio_signal_irq(struct mmc_host *host)
{
host->sdio_irq_pending = true;
- queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+ schedule_work(&host->sdio_irq_work);
}
EXPORT_SYMBOL_GPL(sdio_signal_irq);
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index c36242b86b1d..7423a601e1e5 100644
--- a/drivers/mmc/core/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/circ_buf.h>
#include <linux/tty.h>
@@ -177,11 +178,9 @@ static inline void sdio_uart_release_func(struct sdio_uart_port *port)
sdio_release_host(port->func);
}
-static inline unsigned int sdio_in(struct sdio_uart_port *port, int offset)
+static inline u8 sdio_in(struct sdio_uart_port *port, int offset)
{
- unsigned char c;
- c = sdio_readb(port->func, port->regs_offset + offset, NULL);
- return c;
+ return sdio_readb(port->func, port->regs_offset + offset, NULL);
}
static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
@@ -191,8 +190,8 @@ static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port)
{
- unsigned char status;
unsigned int ret;
+ u8 status;
/* FIXME: What stops this losing the delta bits and breaking
sdio_uart_check_modem_status ? */
@@ -245,26 +244,12 @@ static inline void sdio_uart_update_mctrl(struct sdio_uart_port *port,
static void sdio_uart_change_speed(struct sdio_uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned char cval, fcr = 0;
unsigned int baud, quot;
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- cval = UART_LCR_WLEN5;
- break;
- case CS6:
- cval = UART_LCR_WLEN6;
- break;
- case CS7:
- cval = UART_LCR_WLEN7;
- break;
- default:
- case CS8:
- cval = UART_LCR_WLEN8;
- break;
- }
+ cval = UART_LCR_WLEN(tty_get_char_size(termios->c_cflag));
if (termios->c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
@@ -367,15 +352,13 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port)
sdio_out(port, UART_IER, port->ier);
}
-static void sdio_uart_receive_chars(struct sdio_uart_port *port,
- unsigned int *status)
+static void sdio_uart_receive_chars(struct sdio_uart_port *port, u8 *status)
{
- unsigned int ch, flag;
int max_count = 256;
do {
- ch = sdio_in(port, UART_RX);
- flag = TTY_NORMAL;
+ u8 ch = sdio_in(port, UART_RX);
+ u8 flag = TTY_NORMAL;
port->icount.rx++;
if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
@@ -462,8 +445,8 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
{
- int status;
struct tty_struct *tty;
+ u8 status;
status = sdio_in(port, UART_MSR);
@@ -488,16 +471,16 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
port->icount.cts++;
tty = tty_port_tty_get(&port->port);
if (tty && C_CRTSCTS(tty)) {
- int cts = (status & UART_MSR_CTS);
+ bool cts = status & UART_MSR_CTS;
if (tty->hw_stopped) {
if (cts) {
- tty->hw_stopped = 0;
+ tty->hw_stopped = false;
sdio_uart_start_tx(port);
tty_wakeup(tty);
}
} else {
if (!cts) {
- tty->hw_stopped = 1;
+ tty->hw_stopped = true;
sdio_uart_stop_tx(port);
}
}
@@ -512,7 +495,7 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
static void sdio_uart_irq(struct sdio_func *func)
{
struct sdio_uart_port *port = sdio_get_drvdata(func);
- unsigned int iir, lsr;
+ u8 iir, lsr;
/*
* In a few places sdio_uart_irq() is called directly instead of
@@ -539,7 +522,7 @@ static void sdio_uart_irq(struct sdio_func *func)
port->in_sdio_uart_irq = NULL;
}
-static int uart_carrier_raised(struct tty_port *tport)
+static bool uart_carrier_raised(struct tty_port *tport)
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
@@ -548,28 +531,27 @@ static int uart_carrier_raised(struct tty_port *tport)
return 1;
ret = sdio_uart_get_mctrl(port);
sdio_uart_release_func(port);
- if (ret & TIOCM_CAR)
- return 1;
- return 0;
+
+ return ret & TIOCM_CAR;
}
/**
* uart_dtr_rts - port helper to set uart signals
* @tport: tty port to be updated
- * @onoff: set to turn on DTR/RTS
+ * @active: set to turn on DTR/RTS
*
* Called by the tty port helpers when the modem signals need to be
* adjusted during an open, close and hangup.
*/
-static void uart_dtr_rts(struct tty_port *tport, int onoff)
+static void uart_dtr_rts(struct tty_port *tport, bool active)
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
int ret = sdio_uart_claim_func(port);
if (ret)
return;
- if (onoff == 0)
+ if (!active)
sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
else
sdio_uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
@@ -647,7 +629,7 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
if (C_CRTSCTS(tty))
if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
- tty->hw_stopped = 1;
+ tty->hw_stopped = true;
clear_bit(TTY_IO_ERROR, &tty->flags);
@@ -774,8 +756,8 @@ static void sdio_uart_hangup(struct tty_struct *tty)
tty_port_hangup(&port->port);
}
-static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
+static ssize_t sdio_uart_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
struct sdio_uart_port *port = tty->driver_data;
int ret;
@@ -809,7 +791,7 @@ static unsigned int sdio_uart_chars_in_buffer(struct tty_struct *tty)
return kfifo_len(&port->xmit_fifo);
}
-static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
+static void sdio_uart_send_xchar(struct tty_struct *tty, u8 ch)
{
struct sdio_uart_port *port = tty->driver_data;
@@ -872,7 +854,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
}
static void sdio_uart_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct sdio_uart_port *port = tty->driver_data;
unsigned int cflag = tty->termios.c_cflag;
@@ -896,14 +878,14 @@ static void sdio_uart_set_termios(struct tty_struct *tty,
/* Handle turning off CRTSCTS */
if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
- tty->hw_stopped = 0;
+ tty->hw_stopped = false;
sdio_uart_start_tx(port);
}
/* Handle turning on CRTSCTS */
if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) {
- tty->hw_stopped = 1;
+ tty->hw_stopped = true;
sdio_uart_stop_tx(port);
}
}
@@ -1135,9 +1117,10 @@ static int __init sdio_uart_init(void)
int ret;
struct tty_driver *tty_drv;
- sdio_uart_tty_driver = tty_drv = alloc_tty_driver(UART_NR);
- if (!tty_drv)
- return -ENOMEM;
+ sdio_uart_tty_driver = tty_drv = tty_alloc_driver(UART_NR,
+ TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV);
+ if (IS_ERR(tty_drv))
+ return PTR_ERR(tty_drv);
tty_drv->driver_name = "sdio_uart";
tty_drv->name = "ttySDIO";
@@ -1145,7 +1128,6 @@ static int __init sdio_uart_init(void)
tty_drv->minor_start = 0;
tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
tty_drv->subtype = SERIAL_TYPE_NORMAL;
- tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
tty_drv->init_termios = tty_std_termios;
tty_drv->init_termios.c_cflag = B4800 | CS8 | CREAD | HUPCL | CLOCAL;
tty_drv->init_termios.c_ispeed = 4800;
@@ -1165,7 +1147,7 @@ static int __init sdio_uart_init(void)
err2:
tty_unregister_driver(tty_drv);
err1:
- put_tty_driver(tty_drv);
+ tty_driver_kref_put(tty_drv);
return ret;
}
@@ -1173,11 +1155,12 @@ static void __exit sdio_uart_exit(void)
{
sdio_unregister_driver(&sdio_uart_driver);
tty_unregister_driver(sdio_uart_tty_driver);
- put_tty_driver(sdio_uart_tty_driver);
+ tty_driver_kref_put(sdio_uart_tty_driver);
}
module_init(sdio_uart_init);
module_exit(sdio_uart_exit);
MODULE_AUTHOR("Nicolas Pitre");
+MODULE_DESCRIPTION("SDIO UART/GPS driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 05e907451df9..c5bc6268803e 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -19,10 +19,11 @@
struct mmc_gpio {
struct gpio_desc *ro_gpio;
struct gpio_desc *cd_gpio;
- irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
+ irq_handler_t cd_gpio_isr;
char *ro_label;
char *cd_label;
u32 cd_debounce_delay_ms;
+ int cd_irq;
};
static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
@@ -39,34 +40,50 @@ static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
int mmc_gpio_alloc(struct mmc_host *host)
{
- struct mmc_gpio *ctx = devm_kzalloc(host->parent,
- sizeof(*ctx), GFP_KERNEL);
-
- if (ctx) {
- ctx->cd_debounce_delay_ms = 200;
- ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL,
- "%s cd", dev_name(host->parent));
- if (!ctx->cd_label)
- return -ENOMEM;
- ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL,
- "%s ro", dev_name(host->parent));
- if (!ctx->ro_label)
- return -ENOMEM;
- host->slot.handler_priv = ctx;
- host->slot.cd_irq = -EINVAL;
- }
+ const char *devname = dev_name(host->parent);
+ struct mmc_gpio *ctx;
+
+ ctx = devm_kzalloc(host->parent, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->cd_debounce_delay_ms = 200;
+ ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s cd", devname);
+ if (!ctx->cd_label)
+ return -ENOMEM;
+ ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s ro", devname);
+ if (!ctx->ro_label)
+ return -ENOMEM;
+ ctx->cd_irq = -EINVAL;
+ host->slot.handler_priv = ctx;
+ host->slot.cd_irq = -EINVAL;
- return ctx ? 0 : -ENOMEM;
+ return 0;
}
+void mmc_gpio_set_cd_irq(struct mmc_host *host, int irq)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ if (!ctx || irq < 0)
+ return;
+
+ ctx->cd_irq = irq;
+}
+EXPORT_SYMBOL(mmc_gpio_set_cd_irq);
+
int mmc_gpio_get_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
+ int cansleep;
if (!ctx || !ctx->ro_gpio)
return -ENOSYS;
- return gpiod_get_value_cansleep(ctx->ro_gpio);
+ cansleep = gpiod_cansleep(ctx->ro_gpio);
+ return cansleep ?
+ gpiod_get_value_cansleep(ctx->ro_gpio) :
+ gpiod_get_value(ctx->ro_gpio);
}
EXPORT_SYMBOL(mmc_gpio_get_ro);
@@ -98,7 +115,9 @@ void mmc_gpiod_request_cd_irq(struct mmc_host *host)
* Do not use IRQ if the platform prefers to poll, e.g., because that
* IRQ number is already used by another unit and cannot be shared.
*/
- if (!(host->caps & MMC_CAP_NEEDS_POLL))
+ if (ctx->cd_irq >= 0)
+ irq = ctx->cd_irq;
+ else if (!(host->caps & MMC_CAP_NEEDS_POLL))
irq = gpiod_to_irq(ctx->cd_gpio);
if (irq >= 0) {
@@ -140,19 +159,6 @@ int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on)
}
EXPORT_SYMBOL(mmc_gpio_set_cd_wake);
-/* Register an alternate interrupt service routine for
- * the card-detect GPIO.
- */
-void mmc_gpio_set_cd_isr(struct mmc_host *host,
- irqreturn_t (*isr)(int irq, void *dev_id))
-{
- struct mmc_gpio *ctx = host->slot.handler_priv;
-
- WARN_ON(ctx->cd_gpio_isr);
- ctx->cd_gpio_isr = isr;
-}
-EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
-
/**
* mmc_gpiod_request_cd - request a gpio descriptor for card-detection
* @host: mmc host
@@ -178,6 +184,10 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
if (IS_ERR(desc))
return PTR_ERR(desc);
+ /* Update default label if no con_id provided */
+ if (!con_id)
+ gpiod_set_consumer_name(desc, ctx->cd_label);
+
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
@@ -198,13 +208,33 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
}
EXPORT_SYMBOL(mmc_gpiod_request_cd);
-bool mmc_can_gpio_cd(struct mmc_host *host)
+/**
+ * mmc_gpiod_set_cd_config - set config for card-detection GPIO
+ * @host: mmc host
+ * @config: Generic pinconf config (from pinconf_to_config_packed())
+ *
+ * This can be used by mmc host drivers to fixup a card-detection GPIO's config
+ * (e.g. set PIN_CONFIG_BIAS_PULL_UP) after acquiring the GPIO descriptor
+ * through mmc_gpiod_request_cd().
+ *
+ * Returns:
+ * 0 on success, or a negative errno value on error.
+ */
+int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return gpiod_set_config(ctx->cd_gpio, config);
+}
+EXPORT_SYMBOL(mmc_gpiod_set_cd_config);
+
+bool mmc_host_can_gpio_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
return ctx->cd_gpio ? true : false;
}
-EXPORT_SYMBOL(mmc_can_gpio_cd);
+EXPORT_SYMBOL(mmc_host_can_gpio_cd);
/**
* mmc_gpiod_request_ro - request a gpio descriptor for write protection
@@ -226,6 +256,10 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
if (IS_ERR(desc))
return PTR_ERR(desc);
+ /* Update default label if no con_id provided */
+ if (!con_id)
+ gpiod_set_consumer_name(desc, ctx->ro_label);
+
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
@@ -241,10 +275,10 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
}
EXPORT_SYMBOL(mmc_gpiod_request_ro);
-bool mmc_can_gpio_ro(struct mmc_host *host)
+bool mmc_host_can_gpio_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
return ctx->ro_gpio ? true : false;
}
-EXPORT_SYMBOL(mmc_can_gpio_ro);
+EXPORT_SYMBOL(mmc_host_can_gpio_ro);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 71313961cc54..24f07df32a1a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -14,6 +14,15 @@ config MMC_DEBUG
added host drivers please don't invent their private macro for
debugging.
+config MMC_SUNPLUS
+ tristate "Sunplus SP7021 MMC Controller"
+ depends on ARCH_SUNPLUS || COMPILE_TEST
+ help
+ If you say yes here, you will get support for eMMC host interface
+ on Sunplus SoCs.
+
+ If unsure, say N
+
config MMC_ARMMMCI
tristate "ARM AMBA Multimedia Card Interface support"
depends on ARM_AMBA
@@ -47,7 +56,7 @@ config MMC_STM32_SDMMC
config MMC_PXA
tristate "Intel PXA25x/26x/27x Multimedia Card Interface support"
- depends on ARCH_PXA
+ depends on ARCH_PXA || COMPILE_TEST
help
This selects the Intel(R) PXA(R) Multimedia card Interface.
If you have a PXA(R) platform with a Multimedia Card slot,
@@ -89,10 +98,20 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
This is the case for the Nintendo Wii SDHCI.
+config MMC_SDHCI_UHS2
+ tristate "UHS2 support on SDHCI controller" if COMPILE_TEST
+ depends on MMC_SDHCI
+ help
+ This option is selected by SDHCI controller drivers that want to
+ support UHS2-capable devices.
+
+ If you have a controller with this feature, say Y or M here.
+
config MMC_SDHCI_PCI
tristate "SDHCI support on PCI bus"
depends on MMC_SDHCI && PCI
select MMC_CQHCI
+ select MMC_SDHCI_UHS2
select IOSF_MBI if X86
select MMC_SDHCI_IO_ACCESSORS
help
@@ -157,6 +176,7 @@ config MMC_SDHCI_OF_ARASAN
config MMC_SDHCI_OF_ASPEED
tristate "SDHCI OF support for the ASPEED SDHCI controller"
+ depends on ARCH_ASPEED || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
depends on OF && OF_ADDRESS
select MMC_SDHCI_IO_ACCESSORS
@@ -169,8 +189,10 @@ config MMC_SDHCI_OF_ASPEED
If unsure, say N.
config MMC_SDHCI_OF_ASPEED_TEST
- bool "Tests for the ASPEED SDHCI driver"
- depends on MMC_SDHCI_OF_ASPEED && KUNIT=y
+ bool "Tests for the ASPEED SDHCI driver" if !KUNIT_ALL_TESTS
+ depends on MMC_SDHCI_OF_ASPEED && KUNIT
+ depends on (MMC_SDHCI_OF_ASPEED=m || KUNIT=y)
+ default KUNIT_ALL_TESTS
help
Enable KUnit tests for the ASPEED SDHCI driver. Select this
option only if you will boot the kernel for the purpose of running
@@ -221,12 +243,27 @@ config MMC_SDHCI_OF_DWCMSHC
depends on MMC_SDHCI_PLTFM
depends on OF
depends on COMMON_CLK
+ select MMC_CQHCI
help
This selects Synopsys DesignWare Cores Mobile Storage Controller
support.
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+config MMC_SDHCI_OF_K1
+ tristate "SDHCI OF support for the SpacemiT K1 SoC"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ depends on COMMON_CLK
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ found in the SpacemiT K1 SoC.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_OF_SPARX5
tristate "SDHCI OF support for the MCHP Sparx5 SoC"
depends on MMC_SDHCI_PLTFM
@@ -239,23 +276,25 @@ config MMC_SDHCI_OF_SPARX5
If unsure, say N.
-config MMC_SDHCI_CADENCE
- tristate "SDHCI support for the Cadence SD/SDIO/eMMC controller"
+config MMC_SDHCI_OF_MA35D1
+ tristate "SDHCI OF support for the MA35D1 SDHCI controller"
+ depends on ARCH_MA35 || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
- depends on OF
help
- This selects the Cadence SD/SDIO/eMMC driver.
+ This selects the MA35D1 Secure Digital Host Controller Interface.
+ The controller supports SD/MMC/SDIO devices.
If you have a controller with this interface, say Y or M here.
If unsure, say N.
-config MMC_SDHCI_CNS3XXX
- tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
- depends on ARCH_CNS3XXX || COMPILE_TEST
+config MMC_SDHCI_CADENCE
+ tristate "SDHCI support for the Cadence SD/SDIO/eMMC controller"
depends on MMC_SDHCI_PLTFM
+ depends on OF
+ select MMC_SDHCI_IO_ACCESSORS
help
- This selects the SDHCI support for CNS3xxx System-on-Chip devices.
+ This selects the Cadence SD/SDIO/eMMC driver.
If you have a controller with this interface, say Y or M here.
@@ -315,15 +354,16 @@ config MMC_SDHCI_TEGRA
If unsure, say N.
config MMC_SDHCI_S3C
- tristate "SDHCI support on Samsung S3C SoC"
+ tristate "SDHCI support on Samsung S3C/S5P/Exynos SoC"
depends on MMC_SDHCI
depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- often referrered to as the HSMMC block in some of the Samsung S3C
- range of SoC.
+ often referred to as the HSMMC block in some of the Samsung
+ S3C6410, S5Pv210 and Exynos (Exynso4210, Exynos4412) SoCs.
- If you have a controller with this interface, say Y or M here.
+ If you have a controller with this interface (thereforeyou build for
+ such Samsung SoC), say Y or M here.
If unsure, say N.
@@ -346,6 +386,7 @@ config MMC_SDHCI_PXAV2
depends on MMC_SDHCI_PLTFM
depends on ARCH_MMP || COMPILE_TEST
default CPU_PXA910
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the Marvell(R) PXAV2 SD Host Controller.
If you have a PXA9XX platform with SD Host Controller
@@ -360,7 +401,7 @@ config MMC_SDHCI_SPEAR
depends on OF
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- often referrered to as the HSMMC block in some of the ST SPEAR range
+ often referred to as the HSMMC block in some of the ST SPEAR range
of SoC
If you have a controller with this interface, say Y or M here.
@@ -425,6 +466,14 @@ config MMC_SDHCI_IPROC
If unsure, say N.
+config MMC_SDHCI_NPCM
+ tristate "Secure Digital Host Controller Interface support for NPCM"
+ depends on ARCH_NPCM || COMPILE_TEST
+ depends on MMC_SDHCI_PLTFM
+ help
+ This provides support for the SD/eMMC controller found in
+ NPCM BMC family SoCs.
+
config MMC_MESON_GX
tristate "Amlogic S905/GX*/AXG SD/MMC Host Controller support"
depends on ARCH_MESON|| COMPILE_TEST
@@ -455,6 +504,7 @@ config MMC_MESON_MX_SDIO
depends on ARCH_MESON || COMPILE_TEST
depends on COMMON_CLK
depends on OF_ADDRESS
+ select REGMAP_MMIO
help
This selects support for the SD/MMC Host Controller on
Amlogic Meson6, Meson8 and Meson8b SoCs.
@@ -486,7 +536,6 @@ config MMC_SDHCI_ST
config MMC_OMAP
tristate "TI OMAP Multimedia Card Interface support"
depends on ARCH_OMAP
- depends on TPS65010 || !MACH_OMAP_H2
help
This selects the TI OMAP Multimedia card Interface.
If you have an OMAP board with a Multimedia Card slot,
@@ -523,11 +572,12 @@ config MMC_ALCOR
of Alcor Micro PCI-E card reader
config MMC_AU1X
- tristate "Alchemy AU1XX0 MMC Card Interface support"
+ bool "Alchemy AU1XX0 MMC Card Interface support"
depends on MIPS_ALCHEMY
+ depends on MMC=y
help
This selects the AMD Alchemy(R) Multimedia card interface.
- If you have a Alchemy platform with a MMC slot, say Y or M here.
+ If you have a Alchemy platform with a MMC slot, say Y here.
If unsure, say N.
@@ -547,7 +597,7 @@ config MMC_SDHCI_MSM
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
- select QCOM_SCM if MMC_CRYPTO && ARCH_QCOM
+ select QCOM_INLINE_CRYPTO_ENGINE if MMC_CRYPTO
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
@@ -559,7 +609,7 @@ config MMC_SDHCI_MSM
config MMC_MXC
tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support"
- depends on ARCH_MXC || PPC_MPC512x
+ depends on ARCH_MXC || PPC_MPC512x || COMPILE_TEST
help
This selects the Freescale i.MX21, i.MX27, i.MX31 or MPC512x
Multimedia Card Interface. If you have an i.MX or MPC512x platform
@@ -626,49 +676,6 @@ config MMC_SPI
If unsure, or if your system has no SPI master driver, say N.
-config MMC_S3C
- tristate "Samsung S3C SD/MMC Card Interface support"
- depends on ARCH_S3C24XX || COMPILE_TEST
- depends on S3C24XX_DMAC || COMPILE_TEST
- help
- This selects a driver for the MCI interface found in
- Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs.
- If you have a board based on one of those and a MMC/SD
- slot, say Y or M here.
-
- If unsure, say N.
-
-config MMC_S3C_HW_SDIO_IRQ
- bool "Hardware support for SDIO IRQ"
- depends on MMC_S3C
- help
- Enable the hardware support for SDIO interrupts instead of using
- the generic polling code.
-
-choice
- prompt "Samsung S3C SD/MMC transfer code"
- depends on MMC_S3C
-
-config MMC_S3C_PIO
- bool "Use PIO transfers only"
- help
- Use PIO to transfer data between memory and the hardware.
-
- PIO is slower than DMA as it requires CPU instructions to
- move the data. This has been the traditional default for
- the S3C MCI driver.
-
-config MMC_S3C_DMA
- bool "Use DMA transfers only"
- help
- Use DMA to transfer data between memory and the hardware.
-
- Currently, the DMA support in this driver seems to not be
- working properly and needs to be debugged before this
- option is useful.
-
-endchoice
-
config MMC_SDRICOH_CS
tristate "MMC/SD driver for Ricoh Bay1Controllers"
depends on PCI && PCMCIA
@@ -696,19 +703,11 @@ config MMC_SDHCI_SPRD
config MMC_TMIO_CORE
tristate
-config MMC_TMIO
- tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
- depends on MFD_TMIO || MFD_ASIC3 || COMPILE_TEST
- select MMC_TMIO_CORE
- help
- This provides support for the SD/MMC cell found in TC6393XB,
- T7L66XB and also HTC ASIC3
-
config MMC_SDHI
tristate "Renesas SDHI SD/SDIO controller support"
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
+ depends on (RESET_CONTROLLER && REGULATOR) || !OF
select MMC_TMIO_CORE
- select RESET_CONTROLLER if ARCH_RENESAS
help
This provides support for the SDHI SD/SDIO controller found in
Renesas SuperH, ARM and ARM64 based SoCs
@@ -724,9 +723,9 @@ config MMC_SDHI_SYS_DMAC
config MMC_SDHI_INTERNAL_DMAC
tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering"
- depends on ARM64 || ARCH_R7S9210 || ARCH_R8A77470 || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on MMC_SDHI
- default MMC_SDHI if (ARM64 || ARCH_R7S9210 || ARCH_R8A77470)
+ default MMC_SDHI if ARCH_RENESAS
help
This provides DMA support for SDHI SD/SDIO controllers
using on-chip bus mastering. This supports the controllers
@@ -837,6 +836,15 @@ config MMC_DW_HI3798CV200
Synopsys DesignWare Memory Card Interface driver. Select this option
for platforms based on HiSilicon Hi3798CV200 SoC.
+config MMC_DW_HI3798MV200
+ tristate "Hi3798MV200 specific extensions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW
+ select MMC_DW_PLTFM
+ help
+ This selects support for HiSilicon Hi3798MV200 SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on HiSilicon Hi3798MV200 SoC.
+
config MMC_DW_K3
tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
depends on MMC_DW
@@ -859,13 +867,24 @@ config MMC_DW_PCI
config MMC_DW_ROCKCHIP
tristate "Rockchip specific extensions for Synopsys DW Memory Card Interface"
- depends on MMC_DW && ARCH_ROCKCHIP
+ depends on MMC_DW
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
select MMC_DW_PLTFM
help
This selects support for Rockchip SoC specific extensions to the
Synopsys DesignWare Memory Card Interface driver. Select this option
for platforms based on RK3066, RK3188 and RK3288 SoC's.
+config MMC_DW_STARFIVE
+ tristate "StarFive specific extensions for Synopsys DW Memory Card Interface"
+ depends on SOC_STARFIVE
+ depends on MMC_DW
+ select MMC_DW_PLTFM
+ help
+ This selects support for StarFive JH7110 SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on StarFive JH7110 SoC.
+
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
@@ -931,8 +950,8 @@ config MMC_USHC
config MMC_WMT
tristate "Wondermedia SD/MMC Host Controller support"
- depends on ARCH_VT8500
- default y
+ depends on ARCH_VT8500 || COMPILE_TEST
+ default ARCH_VT8500
help
This selects support for the SD/MMC Host Controller on
Wondermedia WM8505/WM8650 based SoCs.
@@ -964,6 +983,7 @@ config MMC_REALTEK_USB
config MMC_SUNXI
tristate "Allwinner sunxi SD/MMC Host Controller support"
depends on ARCH_SUNXI || COMPILE_TEST
+ depends on SUNXI_CCU
help
This selects support for the SD/MMC Host Controller on
Allwinner sunxi SoCs.
@@ -1015,6 +1035,7 @@ config MMC_MTK
depends on COMMON_CLK
select REGULATOR
select MMC_CQHCI
+ select MMC_HSQ
help
This selects the MediaTek(R) Secure digital and Multimedia card Interface.
If you have a machine with a integrated SD/MMC card reader, say Y or M here.
@@ -1034,10 +1055,10 @@ config MMC_SDHCI_MICROCHIP_PIC32
config MMC_SDHCI_BRCMSTB
tristate "Broadcom SDIO/SD/MMC support"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ depends on ARCH_BRCMSTB || ARCH_BCM2835 || BMIPS_GENERIC || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
select MMC_CQHCI
- default y
+ default ARCH_BRCMSTB || BMIPS_GENERIC
help
This selects support for the SDIO/SD/MMC Host Controller on
Broadcom STB SoCs.
@@ -1054,14 +1075,15 @@ config MMC_SDHCI_XENON
config MMC_SDHCI_OMAP
tristate "TI SDHCI Controller Support"
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
depends on MMC_SDHCI_PLTFM && OF
select THERMAL
imply TI_SOC_THERMAL
select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- support present in TI's DRA7 SOCs. The controller supports
- SD/MMC/SDIO devices.
+ support present in TI's Keystone/OMAP2+/DRA7 SOCs. The controller
+ supports SD/MMC/SDIO devices.
If you have a controller with this interface, say Y or M here.
@@ -1069,13 +1091,15 @@ config MMC_SDHCI_OMAP
config MMC_SDHCI_AM654
tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
- depends on MMC_SDHCI_PLTFM && OF && REGMAP_MMIO
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on MMC_SDHCI_PLTFM && OF
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
+ select REGMAP_MMIO
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- support present in TI's AM654 SOCs. The controller supports
- SD/MMC/SDIO devices.
+ support present in TI's AM65x/AM64x/AM62x/J721E SOCs. The controller
+ supports SD/MMC/SDIO devices.
If you have a controller with this interface, say Y or M here.
@@ -1089,5 +1113,32 @@ config MMC_OWL
This selects support for the SD/MMC Host Controller on
Actions Semi Owl SoCs.
+config MMC_LOONGSON2
+ tristate "Loongson-2K SD/SDIO/eMMC Host Interface support"
+ depends on LOONGARCH || COMPILE_TEST
+ depends on HAS_DMA
+ select REGMAP_MMIO
+ help
+ This selects support for the SD/SDIO/eMMC Host Controller on
+ Loongson-2K series CPUs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mmc_loongson2.
+
+ If unsure, say N.
+
config MMC_SDHCI_EXTERNAL_DMA
bool
+
+config MMC_LITEX
+ tristate "LiteX MMC Host Controller support"
+ depends on ((PPC_MICROWATT || LITEX) && OF && HAVE_CLK) || COMPILE_TEST
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
+ help
+ This selects support for the MMC Host Controller found in LiteX SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called litex_mmc.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 14004cc09aaa..5057fea8afb6 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -11,10 +11,10 @@ obj-$(CONFIG_MMC_PXA) += pxamci.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
+obj-$(CONFIG_MMC_SDHCI_UHS2) += sdhci-uhs2.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o \
sdhci-pci-dwc-mshc.o sdhci-pci-gli.o
-obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
@@ -35,9 +35,7 @@ obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
obj-$(CONFIG_MMC_SPI) += mmc_spi.o
obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o
-obj-$(CONFIG_MMC_S3C) += s3cmci.o
obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
-obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o
obj-$(CONFIG_MMC_SDHI_SYS_DMAC) += renesas_sdhi_sys_dmac.o
@@ -54,9 +52,11 @@ obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
obj-$(CONFIG_MMC_DW_BLUEFIELD) += dw_mmc-bluefield.o
obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
obj-$(CONFIG_MMC_DW_HI3798CV200) += dw_mmc-hi3798cv200.o
+obj-$(CONFIG_MMC_DW_HI3798MV200) += dw_mmc-hi3798mv200.o
obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o
obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
obj-$(CONFIG_MMC_DW_ROCKCHIP) += dw_mmc-rockchip.o
+obj-$(CONFIG_MMC_DW_STARFIVE) += dw_mmc-starfive.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
@@ -72,13 +72,13 @@ obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o
obj-$(CONFIG_MMC_BCM2835) += bcm2835.o
obj-$(CONFIG_MMC_OWL) += owl-mmc.o
+obj-$(CONFIG_MMC_LOONGSON2) += loongson2-mmc.o
obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_CADENCE) += sdhci-cadence.o
-obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_MCF) += sdhci-esdhc-mcf.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
@@ -89,19 +89,24 @@ obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_OF_DWCMSHC) += sdhci-of-dwcmshc.o
+obj-$(CONFIG_MMC_SDHCI_OF_K1) += sdhci-of-k1.o
obj-$(CONFIG_MMC_SDHCI_OF_SPARX5) += sdhci-of-sparx5.o
+obj-$(CONFIG_MMC_SDHCI_OF_MA35D1) += sdhci-of-ma35d1.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
+obj-$(CONFIG_MMC_SDHCI_NPCM) += sdhci-npcm.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o
+obj-$(CONFIG_MMC_SUNPLUS) += sunplus-mmc.o
obj-$(CONFIG_MMC_CQHCI) += cqhci.o
cqhci-y += cqhci-core.o
cqhci-$(CONFIG_MMC_CRYPTO) += cqhci-crypto.o
obj-$(CONFIG_MMC_HSQ) += mmc_hsq.o
+obj-$(CONFIG_MMC_LITEX) += litex_mmc.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index bfb8efeb7eb8..721db54739c1 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -20,6 +20,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
@@ -208,7 +209,7 @@ static void alcor_trf_block_pio(struct alcor_sdmmc_host *host, bool read)
len = min(host->sg_miter.length, blksize);
dev_dbg(host->dev, "PIO, %s block size: 0x%zx\n",
- read ? "read" : "write", blksize);
+ str_read_write(read), blksize);
host->sg_miter.consumed = len;
host->blocks--;
@@ -1083,7 +1084,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
struct alcor_sdmmc_host *host;
int ret;
- mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
if (!mmc) {
dev_err(&pdev->dev, "Can't allocate MMC\n");
return -ENOMEM;
@@ -1101,11 +1102,9 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, priv->irq,
alcor_irq, alcor_irq_thread, IRQF_SHARED,
DRV_NAME_ALCOR_PCI_SDMMC, host);
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to get irq for data line\n");
- goto free_host;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to get irq for data line\n");
mutex_init(&host->cmd_mutex);
INIT_DELAYED_WORK(&host->timeout_work, alcor_timeout_timer);
@@ -1114,15 +1113,11 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
alcor_hw_init(host);
dev_set_drvdata(&pdev->dev, host);
- mmc_add_host(mmc);
- return 0;
-free_host:
- mmc_free_host(mmc);
- return ret;
+ return mmc_add_host(mmc);
}
-static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
+static void alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
{
struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev);
struct mmc_host *mmc = mmc_from_priv(host);
@@ -1132,12 +1127,8 @@ static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
alcor_hw_uninit(host);
mmc_remove_host(mmc);
- mmc_free_host(mmc);
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int alcor_pci_sdmmc_suspend(struct device *dev)
{
struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
@@ -1158,10 +1149,9 @@ static int alcor_pci_sdmmc_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend,
- alcor_pci_sdmmc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend,
+ alcor_pci_sdmmc_resume);
static const struct platform_device_id alcor_pci_sdmmc_ids[] = {
{
@@ -1179,7 +1169,7 @@ static struct platform_driver alcor_pci_sdmmc_driver = {
.driver = {
.name = DRV_NAME_ALCOR_PCI_SDMMC,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &alcor_mmc_pm_ops
+ .pm = pm_sleep_ptr(&alcor_mmc_pm_ops),
},
};
module_platform_driver(alcor_pci_sdmmc_driver);
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 807177c953f3..fdf6926ea468 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -11,15 +11,14 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/irq.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
@@ -30,15 +29,18 @@
#include <linux/mmc/host.h>
#include <linux/mmc/sdio.h>
-#include <linux/atmel-mci.h>
#include <linux/atmel_pdc.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/workqueue.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
+#include <linux/string_choices.h>
+
+#define ATMCI_MAX_NR_SLOTS 2
/*
* Superset of MCI IP registers integrated in Atmel AT91 Processor
@@ -201,6 +203,29 @@ enum atmci_pdc_buf {
PDC_SECOND_BUF,
};
+/**
+ * struct mci_slot_pdata - board-specific per-slot configuration
+ * @bus_width: Number of data lines wired up the slot
+ * @detect_pin: GPIO pin wired to the card detect switch
+ * @wp_pin: GPIO pin wired to the write protect sensor
+ * @non_removable: The slot is not removable, only detect once
+ *
+ * If a given slot is not present on the board, @bus_width should be
+ * set to 0. The other fields are ignored in this case.
+ *
+ * Any pins that aren't available should be set to a negative value.
+ *
+ * Note that support for multiple slots is experimental -- some cards
+ * might get upset if we don't get the clock management exactly right.
+ * But in most cases, it should work just fine.
+ */
+struct mci_slot_pdata {
+ unsigned int bus_width;
+ struct gpio_desc *detect_pin;
+ struct gpio_desc *wp_pin;
+ bool non_removable;
+};
+
struct atmel_mci_caps {
bool has_dma_conf_reg;
bool has_pdc;
@@ -249,12 +274,12 @@ struct atmel_mci_dma {
* EVENT_DATA_ERROR is pending.
* @stop_cmdr: Value to be loaded into CMDR when the stop command is
* to be sent.
- * @tasklet: Tasklet running the request state machine.
+ * @bh_work: Work running the request state machine.
* @pending_events: Bitmask of events flagged by the interrupt handler
- * to be processed by the tasklet.
+ * to be processed by the work.
* @completed_events: Bitmask of events which the state machine has
* processed.
- * @state: Tasklet state.
+ * @state: Work state.
* @queue: List of slots waiting for access to the controller.
* @need_clock_update: Update the clock rate before the next request.
* @need_reset: Reset controller before next request.
@@ -265,7 +290,8 @@ struct atmel_mci_dma {
* rate and timeout calculations.
* @mapbase: Physical address of the MMIO registers.
* @mck: The peripheral bus clock hooked up to the MMC controller.
- * @pdev: Platform device associated with the MMC controller.
+ * @dev: Device associated with the MMC controller.
+ * @pdata: Per-slot configuration data.
* @slot: Slots sharing this MMC controller.
* @caps: MCI capabilities depending on MCI version.
* @prepare_data: function to setup MCI before data transfer which
@@ -328,7 +354,7 @@ struct atmel_mci {
u32 data_status;
u32 stop_cmdr;
- struct tasklet_struct tasklet;
+ struct work_struct bh_work;
unsigned long pending_events;
unsigned long completed_events;
enum atmel_mci_state state;
@@ -342,8 +368,9 @@ struct atmel_mci {
unsigned long bus_hz;
unsigned long mapbase;
struct clk *mck;
- struct platform_device *pdev;
+ struct device *dev;
+ struct mci_slot_pdata pdata[ATMCI_MAX_NR_SLOTS];
struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
struct atmel_mci_caps caps;
@@ -369,7 +396,6 @@ struct atmel_mci {
* available.
* @wp_pin: GPIO pin used for card write protect sending, or negative
* if not available.
- * @detect_is_active_high: The state of the detect pin when it is active.
* @detect_timer: Timer used for debouncing @detect_pin interrupts.
*/
struct atmel_mci_slot {
@@ -388,9 +414,8 @@ struct atmel_mci_slot {
#define ATMCI_CARD_NEED_INIT 1
#define ATMCI_SHUTDOWN 2
- int detect_pin;
- int wp_pin;
- bool detect_is_active_high;
+ struct gpio_desc *detect_pin;
+ struct gpio_desc *wp_pin;
struct timer_list detect_timer;
};
@@ -497,6 +522,7 @@ static void atmci_show_status_reg(struct seq_file *s,
static int atmci_regs_show(struct seq_file *s, void *v)
{
struct atmel_mci *host = s->private;
+ struct device *dev = host->dev;
u32 *buf;
int ret = 0;
@@ -505,7 +531,7 @@ static int atmci_regs_show(struct seq_file *s, void *v)
if (!buf)
return -ENOMEM;
- pm_runtime_get_sync(&host->pdev->dev);
+ pm_runtime_get_sync(dev);
/*
* Grab a more or less consistent snapshot. Note that we're
@@ -516,8 +542,7 @@ static int atmci_regs_show(struct seq_file *s, void *v)
memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
spin_unlock_bh(&host->lock);
- pm_runtime_mark_last_busy(&host->pdev->dev);
- pm_runtime_put_autosuspend(&host->pdev->dev);
+ pm_runtime_put_autosuspend(dev);
seq_printf(s, "MR:\t0x%08x%s%s ",
buf[ATMCI_MR / 4],
@@ -584,16 +609,15 @@ static void atmci_init_debugfs(struct atmel_mci_slot *slot)
if (!root)
return;
- debugfs_create_file("regs", S_IRUSR, root, host, &atmci_regs_fops);
- debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
- debugfs_create_u32("state", S_IRUSR, root, &host->state);
- debugfs_create_xul("pending_events", S_IRUSR, root,
+ debugfs_create_file("regs", 0400, root, host, &atmci_regs_fops);
+ debugfs_create_file("req", 0400, root, slot, &atmci_req_fops);
+ debugfs_create_u32("state", 0400, root, &host->state);
+ debugfs_create_xul("pending_events", 0400, root,
&host->pending_events);
- debugfs_create_xul("completed_events", S_IRUSR, root,
+ debugfs_create_xul("completed_events", 0400, root,
&host->completed_events);
}
-#if defined(CONFIG_OF)
static const struct of_device_id atmci_dt_ids[] = {
{ .compatible = "atmel,hsmci" },
{ /* sentinel */ }
@@ -601,63 +625,64 @@ static const struct of_device_id atmci_dt_ids[] = {
MODULE_DEVICE_TABLE(of, atmci_dt_ids);
-static struct mci_platform_data*
-atmci_of_init(struct platform_device *pdev)
+static int atmci_of_init(struct atmel_mci *host)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = host->dev;
+ struct device_node *np = dev->of_node;
struct device_node *cnp;
- struct mci_platform_data *pdata;
u32 slot_id;
+ int err;
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
+ if (!np)
+ return dev_err_probe(dev, -EINVAL, "device node not found\n");
for_each_child_of_node(np, cnp) {
if (of_property_read_u32(cnp, "reg", &slot_id)) {
- dev_warn(&pdev->dev, "reg property is missing for %pOF\n",
- cnp);
+ dev_warn(dev, "reg property is missing for %pOF\n", cnp);
continue;
}
if (slot_id >= ATMCI_MAX_NR_SLOTS) {
- dev_warn(&pdev->dev, "can't have more than %d slots\n",
+ dev_warn(dev, "can't have more than %d slots\n",
ATMCI_MAX_NR_SLOTS);
of_node_put(cnp);
break;
}
if (of_property_read_u32(cnp, "bus-width",
- &pdata->slot[slot_id].bus_width))
- pdata->slot[slot_id].bus_width = 1;
-
- pdata->slot[slot_id].detect_pin =
- of_get_named_gpio(cnp, "cd-gpios", 0);
-
- pdata->slot[slot_id].detect_is_active_high =
- of_property_read_bool(cnp, "cd-inverted");
+ &host->pdata[slot_id].bus_width))
+ host->pdata[slot_id].bus_width = 1;
+
+ host->pdata[slot_id].detect_pin =
+ devm_fwnode_gpiod_get(dev, of_fwnode_handle(cnp),
+ "cd", GPIOD_IN, "cd-gpios");
+ err = PTR_ERR_OR_ZERO(host->pdata[slot_id].detect_pin);
+ if (err) {
+ if (err != -ENOENT) {
+ of_node_put(cnp);
+ return err;
+ }
+ host->pdata[slot_id].detect_pin = NULL;
+ }
- pdata->slot[slot_id].non_removable =
+ host->pdata[slot_id].non_removable =
of_property_read_bool(cnp, "non-removable");
- pdata->slot[slot_id].wp_pin =
- of_get_named_gpio(cnp, "wp-gpios", 0);
+ host->pdata[slot_id].wp_pin =
+ devm_fwnode_gpiod_get(dev, of_fwnode_handle(cnp),
+ "wp", GPIOD_IN, "wp-gpios");
+ err = PTR_ERR_OR_ZERO(host->pdata[slot_id].wp_pin);
+ if (err) {
+ if (err != -ENOENT) {
+ of_node_put(cnp);
+ return err;
+ }
+ host->pdata[slot_id].wp_pin = NULL;
+ }
}
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct mci_platform_data*
-atmci_of_init(struct platform_device *dev)
-{
- return ERR_PTR(-EINVAL);
+ return 0;
}
-#endif
static inline unsigned int atmci_get_version(struct atmel_mci *host)
{
@@ -689,11 +714,10 @@ static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
static void atmci_timeout_timer(struct timer_list *t)
{
- struct atmel_mci *host;
+ struct atmel_mci *host = timer_container_of(host, t, timer);
+ struct device *dev = host->dev;
- host = from_timer(host, t, timer);
-
- dev_dbg(&host->pdev->dev, "software timeout\n");
+ dev_dbg(dev, "software timeout\n");
if (host->mrq->cmd->data) {
host->mrq->cmd->data->error = -ETIMEDOUT;
@@ -712,7 +736,7 @@ static void atmci_timeout_timer(struct timer_list *t)
host->need_reset = 1;
host->state = STATE_END_REQUEST;
smp_wmb();
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
@@ -811,15 +835,14 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
static void atmci_send_command(struct atmel_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
+ struct device *dev = host->dev;
unsigned int timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
ATMCI_CMD_TIMEOUT_MS;
WARN_ON(host->cmd);
host->cmd = cmd;
- dev_vdbg(&host->pdev->dev,
- "start command: ARGR=0x%08x CMDR=0x%08x\n",
- cmd->arg, cmd_flags);
+ dev_vdbg(dev, "start command: ARGR=0x%08x CMDR=0x%08x\n", cmd->arg, cmd_flags);
atmci_writel(host, ATMCI_ARGR, cmd->arg);
atmci_writel(host, ATMCI_CMDR, cmd_flags);
@@ -829,13 +852,15 @@ static void atmci_send_command(struct atmel_mci *host,
static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
{
- dev_dbg(&host->pdev->dev, "send stop command\n");
+ struct device *dev = host->dev;
+
+ dev_dbg(dev, "send stop command\n");
atmci_send_command(host, data->stop, host->stop_cmdr);
atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
}
/*
- * Configure given PDC buffer taking care of alignement issues.
+ * Configure given PDC buffer taking care of alignment issues.
* Update host->data_size and host->sg.
*/
static void atmci_pdc_set_single_buf(struct atmel_mci *host,
@@ -902,11 +927,10 @@ static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
static void atmci_pdc_cleanup(struct atmel_mci *host)
{
struct mmc_data *data = host->data;
+ struct device *dev = host->dev;
if (data)
- dma_unmap_sg(&host->pdev->dev,
- data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ dma_unmap_sg(dev, data->sg, data->sg_len, mmc_get_dma_dir(data));
}
/*
@@ -916,6 +940,7 @@ static void atmci_pdc_cleanup(struct atmel_mci *host)
*/
static void atmci_pdc_complete(struct atmel_mci *host)
{
+ struct device *dev = host->dev;
int transfer_size = host->data->blocks * host->data->blksz;
int i;
@@ -932,9 +957,9 @@ static void atmci_pdc_complete(struct atmel_mci *host)
atmci_pdc_cleanup(host);
- dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__);
+ dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
static void atmci_dma_cleanup(struct atmel_mci *host)
@@ -948,14 +973,15 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
}
/*
- * This function is called by the DMA driver from tasklet context.
+ * This function is called by the DMA driver from bh context.
*/
static void atmci_dma_complete(void *arg)
{
struct atmel_mci *host = arg;
struct mmc_data *data = host->data;
+ struct device *dev = host->dev;
- dev_vdbg(&host->pdev->dev, "DMA complete\n");
+ dev_vdbg(dev, "DMA complete\n");
if (host->caps.has_dma_conf_reg)
/* Disable DMA hardware handshaking on MCI */
@@ -968,10 +994,9 @@ static void atmci_dma_complete(void *arg)
* to send the stop command or waiting for NBUSY in this case.
*/
if (data) {
- dev_dbg(&host->pdev->dev,
- "(%s) set pending xfer complete\n", __func__);
+ dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
/*
* Regardless of what the documentation says, we have
@@ -984,7 +1009,7 @@ static void atmci_dma_complete(void *arg)
* haven't seen all the potential error bits yet.
*
* The interrupt handler will schedule a different
- * tasklet to finish things up when the data transfer
+ * bh work to finish things up when the data transfer
* is completely done.
*
* We may not complete the mmc request here anyway
@@ -1043,6 +1068,7 @@ static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
static u32
atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
{
+ struct device *dev = host->dev;
u32 iflags, tmp;
int i;
@@ -1068,8 +1094,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
/* Configure PDC */
host->data_size = data->blocks * data->blksz;
- dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ dma_map_sg(dev, data->sg, data->sg_len, mmc_get_dma_dir(data));
if ((!host->caps.has_rwproof)
&& (host->data->flags & MMC_DATA_WRITE)) {
@@ -1122,13 +1147,12 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
}
/* If we don't have a channel, we can't do DMA */
- chan = host->dma.chan;
- if (chan)
- host->data_chan = chan;
-
- if (!chan)
+ if (!host->dma.chan)
return -ENODEV;
+ chan = host->dma.chan;
+ host->data_chan = chan;
+
if (data->flags & MMC_DATA_READ) {
host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
maxburst = atmci_convert_chksize(host,
@@ -1196,8 +1220,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
static void atmci_stop_transfer(struct atmel_mci *host)
{
- dev_dbg(&host->pdev->dev,
- "(%s) set pending xfer complete\n", __func__);
+ struct device *dev = host->dev;
+
+ dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
@@ -1213,14 +1238,14 @@ static void atmci_stop_transfer_pdc(struct atmel_mci *host)
static void atmci_stop_transfer_dma(struct atmel_mci *host)
{
struct dma_chan *chan = host->data_chan;
+ struct device *dev = host->dev;
if (chan) {
dmaengine_terminate_all(chan);
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
- dev_dbg(&host->pdev->dev,
- "(%s) set pending xfer complete\n", __func__);
+ dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
@@ -1233,6 +1258,7 @@ static void atmci_stop_transfer_dma(struct atmel_mci *host)
static void atmci_start_request(struct atmel_mci *host,
struct atmel_mci_slot *slot)
{
+ struct device *dev = host->dev;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
@@ -1248,7 +1274,7 @@ static void atmci_start_request(struct atmel_mci *host,
host->cmd_status = 0;
host->data_status = 0;
- dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
+ dev_dbg(dev, "start request: cmd %u\n", mrq->cmd->opcode);
if (host->need_reset || host->caps.need_reset_after_xfer) {
iflags = atmci_readl(host, ATMCI_IMR);
@@ -1327,6 +1353,8 @@ static void atmci_start_request(struct atmel_mci *host,
static void atmci_queue_request(struct atmel_mci *host,
struct atmel_mci_slot *slot, struct mmc_request *mrq)
{
+ struct device *dev = host->dev;
+
dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
host->state);
@@ -1336,7 +1364,7 @@ static void atmci_queue_request(struct atmel_mci *host,
host->state = STATE_SENDING_CMD;
atmci_start_request(host, slot);
} else {
- dev_dbg(&host->pdev->dev, "queue request\n");
+ dev_dbg(dev, "queue request\n");
list_add_tail(&slot->queue_node, &host->queue);
}
spin_unlock_bh(&host->lock);
@@ -1346,10 +1374,11 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct atmel_mci_slot *slot = mmc_priv(mmc);
struct atmel_mci *host = slot->host;
+ struct device *dev = host->dev;
struct mmc_data *data;
WARN_ON(slot->mrq);
- dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
+ dev_dbg(dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
/*
* We may "know" the card is gone even though there's still an
@@ -1510,8 +1539,8 @@ static int atmci_get_ro(struct mmc_host *mmc)
int read_only = -ENOSYS;
struct atmel_mci_slot *slot = mmc_priv(mmc);
- if (gpio_is_valid(slot->wp_pin)) {
- read_only = gpio_get_value(slot->wp_pin);
+ if (slot->wp_pin) {
+ read_only = gpiod_get_value(slot->wp_pin);
dev_dbg(&mmc->class_dev, "card is %s\n",
read_only ? "read-only" : "read-write");
}
@@ -1524,9 +1553,8 @@ static int atmci_get_cd(struct mmc_host *mmc)
int present = -ENOSYS;
struct atmel_mci_slot *slot = mmc_priv(mmc);
- if (gpio_is_valid(slot->detect_pin)) {
- present = !(gpio_get_value(slot->detect_pin) ^
- slot->detect_is_active_high);
+ if (slot->detect_pin) {
+ present = gpiod_get_value_cansleep(slot->detect_pin);
dev_dbg(&mmc->class_dev, "card is %spresent\n",
present ? "" : "not ");
}
@@ -1560,10 +1588,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
{
struct atmel_mci_slot *slot = NULL;
struct mmc_host *prev_mmc = host->cur_slot->mmc;
+ struct device *dev = host->dev;
WARN_ON(host->cmd || host->data);
- del_timer(&host->timer);
+ timer_delete(&host->timer);
/*
* Update the MMC clock rate if necessary. This may be
@@ -1582,12 +1611,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
slot = list_entry(host->queue.next,
struct atmel_mci_slot, queue_node);
list_del(&slot->queue_node);
- dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
- mmc_hostname(slot->mmc));
+ dev_vdbg(dev, "list not empty: %s is next\n", mmc_hostname(slot->mmc));
host->state = STATE_SENDING_CMD;
atmci_start_request(host, slot);
} else {
- dev_vdbg(&host->pdev->dev, "list empty\n");
+ dev_vdbg(dev, "list empty\n");
host->state = STATE_IDLE;
}
@@ -1624,7 +1652,8 @@ static void atmci_command_complete(struct atmel_mci *host,
static void atmci_detect_change(struct timer_list *t)
{
- struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer);
+ struct atmel_mci_slot *slot = timer_container_of(slot, t,
+ detect_timer);
bool present;
bool present_old;
@@ -1638,9 +1667,8 @@ static void atmci_detect_change(struct timer_list *t)
if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
return;
- enable_irq(gpio_to_irq(slot->detect_pin));
- present = !(gpio_get_value(slot->detect_pin) ^
- slot->detect_is_active_high);
+ enable_irq(gpiod_to_irq(slot->detect_pin));
+ present = gpiod_get_value_cansleep(slot->detect_pin);
present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
@@ -1719,11 +1747,12 @@ static void atmci_detect_change(struct timer_list *t)
}
}
-static void atmci_tasklet_func(struct tasklet_struct *t)
+static void atmci_work_func(struct work_struct *t)
{
- struct atmel_mci *host = from_tasklet(host, t, tasklet);
+ struct atmel_mci *host = from_work(host, t, bh_work);
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = host->data;
+ struct device *dev = host->dev;
enum atmel_mci_state state = host->state;
enum atmel_mci_state prev_state;
u32 status;
@@ -1732,14 +1761,13 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
state = host->state;
- dev_vdbg(&host->pdev->dev,
- "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
+ dev_vdbg(dev, "bh_work: state %u pending/completed/mask %lx/%lx/%x\n",
state, host->pending_events, host->completed_events,
atmci_readl(host, ATMCI_IMR));
do {
prev_state = state;
- dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
+ dev_dbg(dev, "FSM: state=%d\n", state);
switch (state) {
case STATE_IDLE:
@@ -1752,18 +1780,17 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
* END_REQUEST by default, WAITING_NOTBUSY if it's a
* command needing it or DATA_XFER if there is data.
*/
- dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
+ dev_dbg(dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_CMD_RDY))
break;
- dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
+ dev_dbg(dev, "set completed cmd ready\n");
host->cmd = NULL;
atmci_set_completed(host, EVENT_CMD_RDY);
atmci_command_complete(host, mrq->cmd);
if (mrq->data) {
- dev_dbg(&host->pdev->dev,
- "command with data transfer");
+ dev_dbg(dev, "command with data transfer\n");
/*
* If there is a command error don't start
* data transfer.
@@ -1778,8 +1805,7 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
} else
state = STATE_DATA_XFER;
} else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
- dev_dbg(&host->pdev->dev,
- "command response need waiting notbusy");
+ dev_dbg(dev, "command response need waiting notbusy\n");
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
} else
@@ -1790,7 +1816,7 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
case STATE_DATA_XFER:
if (atmci_test_and_clear_pending(host,
EVENT_DATA_ERROR)) {
- dev_dbg(&host->pdev->dev, "set completed data error\n");
+ dev_dbg(dev, "set completed data error\n");
atmci_set_completed(host, EVENT_DATA_ERROR);
state = STATE_END_REQUEST;
break;
@@ -1803,14 +1829,12 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
* to the next step which is WAITING_NOTBUSY in write
* case and directly SENDING_STOP in read case.
*/
- dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
+ dev_dbg(dev, "FSM: xfer complete?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_XFER_COMPLETE))
break;
- dev_dbg(&host->pdev->dev,
- "(%s) set completed xfer complete\n",
- __func__);
+ dev_dbg(dev, "(%s) set completed xfer complete\n", __func__);
atmci_set_completed(host, EVENT_XFER_COMPLETE);
if (host->caps.need_notbusy_for_read_ops ||
@@ -1818,7 +1842,6 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
} else if (host->mrq->stop) {
- atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
atmci_send_stop_cmd(host, data);
state = STATE_SENDING_STOP;
} else {
@@ -1836,12 +1859,12 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
* included) or a write operation. In the latest case,
* we need to send a stop command.
*/
- dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
+ dev_dbg(dev, "FSM: not busy?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_NOTBUSY))
break;
- dev_dbg(&host->pdev->dev, "set completed not busy\n");
+ dev_dbg(dev, "set completed not busy\n");
atmci_set_completed(host, EVENT_NOTBUSY);
if (host->data) {
@@ -1851,8 +1874,6 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
* command to send.
*/
if (host->mrq->stop) {
- atmci_writel(host, ATMCI_IER,
- ATMCI_CMDRDY);
atmci_send_stop_cmd(host, data);
state = STATE_SENDING_STOP;
} else {
@@ -1873,12 +1894,12 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
* in order to go to the end request state instead of
* sending stop again.
*/
- dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
+ dev_dbg(dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_CMD_RDY))
break;
- dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
+ dev_dbg(dev, "FSM: cmd ready\n");
host->cmd = NULL;
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
@@ -2077,6 +2098,7 @@ static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
static irqreturn_t atmci_interrupt(int irq, void *dev_id)
{
struct atmel_mci *host = dev_id;
+ struct device *dev = host->dev;
u32 status, mask, pending;
unsigned int pass_count = 0;
@@ -2088,21 +2110,21 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
break;
if (pending & ATMCI_DATA_ERROR_FLAGS) {
- dev_dbg(&host->pdev->dev, "IRQ: data error\n");
+ dev_dbg(dev, "IRQ: data error\n");
atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
| ATMCI_RXRDY | ATMCI_TXRDY
| ATMCI_ENDRX | ATMCI_ENDTX
| ATMCI_RXBUFF | ATMCI_TXBUFE);
host->data_status = status;
- dev_dbg(&host->pdev->dev, "set pending data error\n");
+ dev_dbg(dev, "set pending data error\n");
smp_wmb();
atmci_set_pending(host, EVENT_DATA_ERROR);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
if (pending & ATMCI_TXBUFE) {
- dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
+ dev_dbg(dev, "IRQ: tx buffer empty\n");
atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
/*
@@ -2118,7 +2140,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDTX) {
- dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
+ dev_dbg(dev, "IRQ: end of tx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
if (host->data_size) {
@@ -2129,7 +2151,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
}
if (pending & ATMCI_RXBUFF) {
- dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
+ dev_dbg(dev, "IRQ: rx buffer full\n");
atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
/*
@@ -2145,7 +2167,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDRX) {
- dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
+ dev_dbg(dev, "IRQ: end of rx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
if (host->data_size) {
@@ -2162,21 +2184,21 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
* The appropriate workaround is to use the BLKE signal.
*/
if (pending & ATMCI_BLKE) {
- dev_dbg(&host->pdev->dev, "IRQ: blke\n");
+ dev_dbg(dev, "IRQ: blke\n");
atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
smp_wmb();
- dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ dev_dbg(dev, "set pending notbusy\n");
atmci_set_pending(host, EVENT_NOTBUSY);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
if (pending & ATMCI_NOTBUSY) {
- dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
+ dev_dbg(dev, "IRQ: not_busy\n");
atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
smp_wmb();
- dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ dev_dbg(dev, "set pending notbusy\n");
atmci_set_pending(host, EVENT_NOTBUSY);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
if (pending & ATMCI_RXRDY)
@@ -2185,13 +2207,13 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_write_data_pio(host);
if (pending & ATMCI_CMDRDY) {
- dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
+ dev_dbg(dev, "IRQ: cmd ready\n");
atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
host->cmd_status = status;
smp_wmb();
- dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
+ dev_dbg(dev, "set pending cmd rdy\n");
atmci_set_pending(host, EVENT_CMD_RDY);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
@@ -2221,10 +2243,12 @@ static int atmci_init_slot(struct atmel_mci *host,
struct mci_slot_pdata *slot_data, unsigned int id,
u32 sdc_reg, u32 sdio_irq)
{
+ struct device *dev = host->dev;
struct mmc_host *mmc;
struct atmel_mci_slot *slot;
+ int ret;
- mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
+ mmc = devm_mmc_alloc_host(dev, sizeof(*slot));
if (!mmc)
return -ENOMEM;
@@ -2233,16 +2257,15 @@ static int atmci_init_slot(struct atmel_mci *host,
slot->host = host;
slot->detect_pin = slot_data->detect_pin;
slot->wp_pin = slot_data->wp_pin;
- slot->detect_is_active_high = slot_data->detect_is_active_high;
slot->sdc_reg = sdc_reg;
slot->sdio_irq = sdio_irq;
dev_dbg(&mmc->class_dev,
"slot[%u]: bus_width=%u, detect_pin=%d, "
"detect_is_active_high=%s, wp_pin=%d\n",
- id, slot_data->bus_width, slot_data->detect_pin,
- slot_data->detect_is_active_high ? "true" : "false",
- slot_data->wp_pin);
+ id, slot_data->bus_width, desc_to_gpio(slot_data->detect_pin),
+ str_true_false(!gpiod_is_active_low(slot_data->detect_pin)),
+ desc_to_gpio(slot_data->wp_pin));
mmc->ops = &atmci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
@@ -2278,50 +2301,41 @@ static int atmci_init_slot(struct atmel_mci *host,
/* Assume card is present initially */
set_bit(ATMCI_CARD_PRESENT, &slot->flags);
- if (gpio_is_valid(slot->detect_pin)) {
- if (devm_gpio_request(&host->pdev->dev, slot->detect_pin,
- "mmc_detect")) {
- dev_dbg(&mmc->class_dev, "no detect pin available\n");
- slot->detect_pin = -EBUSY;
- } else if (gpio_get_value(slot->detect_pin) ^
- slot->detect_is_active_high) {
+ if (slot->detect_pin) {
+ if (!gpiod_get_value_cansleep(slot->detect_pin))
clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
- }
+ } else {
+ dev_dbg(&mmc->class_dev, "no detect pin available\n");
}
- if (!gpio_is_valid(slot->detect_pin)) {
+ if (!slot->detect_pin) {
if (slot_data->non_removable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
else
mmc->caps |= MMC_CAP_NEEDS_POLL;
}
- if (gpio_is_valid(slot->wp_pin)) {
- if (devm_gpio_request(&host->pdev->dev, slot->wp_pin,
- "mmc_wp")) {
- dev_dbg(&mmc->class_dev, "no WP pin available\n");
- slot->wp_pin = -EBUSY;
- }
- }
+ if (!slot->wp_pin)
+ dev_dbg(&mmc->class_dev, "no WP pin available\n");
host->slot[id] = slot;
mmc_regulator_get_supply(mmc);
- mmc_add_host(mmc);
-
- if (gpio_is_valid(slot->detect_pin)) {
- int ret;
+ ret = mmc_add_host(mmc);
+ if (ret)
+ return ret;
+ if (slot->detect_pin) {
timer_setup(&slot->detect_timer, atmci_detect_change, 0);
- ret = request_irq(gpio_to_irq(slot->detect_pin),
- atmci_detect_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "mmc-detect", slot);
+ ret = request_irq(gpiod_to_irq(slot->detect_pin),
+ atmci_detect_interrupt,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "mmc-detect", slot);
if (ret) {
dev_dbg(&mmc->class_dev,
"could not request IRQ %d for detect pin\n",
- gpio_to_irq(slot->detect_pin));
- slot->detect_pin = -EBUSY;
+ gpiod_to_irq(slot->detect_pin));
+ slot->detect_pin = NULL;
}
}
@@ -2340,42 +2354,23 @@ static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
mmc_remove_host(slot->mmc);
- if (gpio_is_valid(slot->detect_pin)) {
- int pin = slot->detect_pin;
-
- free_irq(gpio_to_irq(pin), slot);
- del_timer_sync(&slot->detect_timer);
+ if (slot->detect_pin) {
+ free_irq(gpiod_to_irq(slot->detect_pin), slot);
+ timer_delete_sync(&slot->detect_timer);
}
slot->host->slot[id] = NULL;
- mmc_free_host(slot->mmc);
}
static int atmci_configure_dma(struct atmel_mci *host)
{
- host->dma.chan = dma_request_chan(&host->pdev->dev, "rxtx");
-
- if (PTR_ERR(host->dma.chan) == -ENODEV) {
- struct mci_platform_data *pdata = host->pdev->dev.platform_data;
- dma_cap_mask_t mask;
-
- if (!pdata || !pdata->dma_filter)
- return -ENODEV;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- host->dma.chan = dma_request_channel(mask, pdata->dma_filter,
- pdata->dma_slave);
- if (!host->dma.chan)
- host->dma.chan = ERR_PTR(-ENODEV);
- }
+ struct device *dev = host->dev;
+ host->dma.chan = dma_request_chan(dev, "rxtx");
if (IS_ERR(host->dma.chan))
return PTR_ERR(host->dma.chan);
- dev_info(&host->pdev->dev, "using %s for DMA transfers\n",
- dma_chan_name(host->dma.chan));
+ dev_info(dev, "using %s for DMA transfers\n", dma_chan_name(host->dma.chan));
host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -2395,11 +2390,11 @@ static int atmci_configure_dma(struct atmel_mci *host)
*/
static void atmci_get_cap(struct atmel_mci *host)
{
+ struct device *dev = host->dev;
unsigned int version;
version = atmci_get_version(host);
- dev_info(&host->pdev->dev,
- "version: 0x%x\n", version);
+ dev_info(dev, "version: 0x%x\n", version);
host->caps.has_dma_conf_reg = false;
host->caps.has_pdc = true;
@@ -2440,15 +2435,14 @@ static void atmci_get_cap(struct atmel_mci *host)
break;
default:
host->caps.has_pdc = false;
- dev_warn(&host->pdev->dev,
- "Unmanaged mci version, set minimum capabilities\n");
+ dev_warn(dev, "Unmanaged mci version, set minimum capabilities\n");
break;
}
}
static int atmci_probe(struct platform_device *pdev)
{
- struct mci_platform_data *pdata;
+ struct device *dev = &pdev->dev;
struct atmel_mci *host;
struct resource *regs;
unsigned int nr_slots;
@@ -2458,32 +2452,28 @@ static int atmci_probe(struct platform_device *pdev)
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENXIO;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmci_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- return PTR_ERR(pdata);
- }
- }
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
- host->pdev = pdev;
+ host->dev = dev;
spin_lock_init(&host->lock);
INIT_LIST_HEAD(&host->queue);
- host->mck = devm_clk_get(&pdev->dev, "mci_clk");
+ ret = atmci_of_init(host);
+ if (ret)
+ return dev_err_probe(dev, ret, "Slot information not available\n");
+
+ host->mck = devm_clk_get(dev, "mci_clk");
if (IS_ERR(host->mck))
return PTR_ERR(host->mck);
- host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ host->regs = devm_ioremap(dev, regs->start, resource_size(regs));
if (!host->regs)
return -ENOMEM;
@@ -2496,9 +2486,9 @@ static int atmci_probe(struct platform_device *pdev)
host->mapbase = regs->start;
- tasklet_setup(&host->tasklet, atmci_tasklet_func);
+ INIT_WORK(&host->bh_work, atmci_work_func);
- ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
+ ret = request_irq(irq, atmci_interrupt, 0, dev_name(dev), host);
if (ret) {
clk_disable_unprepare(host->mck);
return ret;
@@ -2507,19 +2497,21 @@ static int atmci_probe(struct platform_device *pdev)
/* Get MCI capabilities and set operations according to it */
atmci_get_cap(host);
ret = atmci_configure_dma(host);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ clk_disable_unprepare(host->mck);
goto err_dma_probe_defer;
+ }
if (ret == 0) {
host->prepare_data = &atmci_prepare_data_dma;
host->submit_data = &atmci_submit_data_dma;
host->stop_transfer = &atmci_stop_transfer_dma;
} else if (host->caps.has_pdc) {
- dev_info(&pdev->dev, "using PDC\n");
+ dev_info(dev, "using PDC\n");
host->prepare_data = &atmci_prepare_data_pdc;
host->submit_data = &atmci_submit_data_pdc;
host->stop_transfer = &atmci_stop_transfer_pdc;
} else {
- dev_info(&pdev->dev, "using PIO\n");
+ dev_info(dev, "using PIO\n");
host->prepare_data = &atmci_prepare_data;
host->submit_data = &atmci_submit_data;
host->stop_transfer = &atmci_stop_transfer;
@@ -2529,25 +2521,25 @@ static int atmci_probe(struct platform_device *pdev)
timer_setup(&host->timer, atmci_timeout_timer, 0);
- pm_runtime_get_noresume(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
/* We need at least one slot to succeed */
nr_slots = 0;
ret = -ENODEV;
- if (pdata->slot[0].bus_width) {
- ret = atmci_init_slot(host, &pdata->slot[0],
+ if (host->pdata[0].bus_width) {
+ ret = atmci_init_slot(host, &host->pdata[0],
0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
if (!ret) {
nr_slots++;
host->buf_size = host->slot[0]->mmc->max_req_size;
}
}
- if (pdata->slot[1].bus_width) {
- ret = atmci_init_slot(host, &pdata->slot[1],
+ if (host->pdata[1].bus_width) {
+ ret = atmci_init_slot(host, &host->pdata[1],
1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
if (!ret) {
nr_slots++;
@@ -2558,27 +2550,24 @@ static int atmci_probe(struct platform_device *pdev)
}
if (!nr_slots) {
- dev_err(&pdev->dev, "init failed: no slot defined\n");
+ dev_err_probe(dev, ret, "init failed: no slot defined\n");
goto err_init_slot;
}
if (!host->caps.has_rwproof) {
- host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
+ host->buffer = dma_alloc_coherent(dev, host->buf_size,
&host->buf_phys_addr,
GFP_KERNEL);
if (!host->buffer) {
- ret = -ENOMEM;
- dev_err(&pdev->dev, "buffer allocation failed\n");
+ ret = dev_err_probe(dev, -ENOMEM, "buffer allocation failed\n");
goto err_dma_alloc;
}
}
- dev_info(&pdev->dev,
- "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
- host->mapbase, irq, nr_slots);
+ dev_info(dev, "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
+ host->mapbase, irq, nr_slots);
- pm_runtime_mark_last_busy(&host->pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_put_autosuspend(dev);
return 0;
@@ -2590,10 +2579,10 @@ err_dma_alloc:
err_init_slot:
clk_disable_unprepare(host->mck);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
- del_timer_sync(&host->timer);
+ timer_delete_sync(&host->timer);
if (!IS_ERR(host->dma.chan))
dma_release_channel(host->dma.chan);
err_dma_probe_defer:
@@ -2601,16 +2590,16 @@ err_dma_probe_defer:
return ret;
}
-static int atmci_remove(struct platform_device *pdev)
+static void atmci_remove(struct platform_device *pdev)
{
struct atmel_mci *host = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
unsigned int i;
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_sync(dev);
if (host->buffer)
- dma_free_coherent(&pdev->dev, host->buf_size,
- host->buffer, host->buf_phys_addr);
+ dma_free_coherent(dev, host->buf_size, host->buffer, host->buf_phys_addr);
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i])
@@ -2621,7 +2610,7 @@ static int atmci_remove(struct platform_device *pdev)
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
atmci_readl(host, ATMCI_SR);
- del_timer_sync(&host->timer);
+ timer_delete_sync(&host->timer);
if (!IS_ERR(host->dma.chan))
dma_release_channel(host->dma.chan);
@@ -2629,13 +2618,10 @@ static int atmci_remove(struct platform_device *pdev)
clk_disable_unprepare(host->mck);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
-
- return 0;
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
}
-#ifdef CONFIG_PM
static int atmci_runtime_suspend(struct device *dev)
{
struct atmel_mci *host = dev_get_drvdata(dev);
@@ -2655,12 +2641,10 @@ static int atmci_runtime_resume(struct device *dev)
return clk_prepare_enable(host->mck);
}
-#endif
static const struct dev_pm_ops atmci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
};
static struct platform_driver atmci_driver = {
@@ -2669,8 +2653,8 @@ static struct platform_driver atmci_driver = {
.driver = {
.name = "atmel_mci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = of_match_ptr(atmci_dt_ids),
- .pm = &atmci_dev_pm_ops,
+ .of_match_table = atmci_dt_ids,
+ .pm = pm_ptr(&atmci_dev_pm_ops),
},
};
module_platform_driver(atmci_driver);
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 0acc237843f7..cc6e05f9b96f 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -42,6 +42,7 @@
#include <linux/leds.h>
#include <linux/mmc/host.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <asm/io.h>
#include <asm/mach-au1x00/au1000.h>
@@ -113,8 +114,8 @@ struct au1xmmc_host {
int irq;
- struct tasklet_struct finish_task;
- struct tasklet_struct data_task;
+ struct work_struct finish_bh_work;
+ struct work_struct data_bh_work;
struct au1xmmc_platform_data *platdata;
struct platform_device *pdev;
struct resource *ioarea;
@@ -253,9 +254,9 @@ static void au1xmmc_finish_request(struct au1xmmc_host *host)
mmc_request_done(host->mmc, mrq);
}
-static void au1xmmc_tasklet_finish(struct tasklet_struct *t)
+static void au1xmmc_finish_bh_work(struct work_struct *t)
{
- struct au1xmmc_host *host = from_tasklet(host, t, finish_task);
+ struct au1xmmc_host *host = from_work(host, t, finish_bh_work);
au1xmmc_finish_request(host);
}
@@ -363,9 +364,9 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
au1xmmc_finish_request(host);
}
-static void au1xmmc_tasklet_data(struct tasklet_struct *t)
+static void au1xmmc_data_bh_work(struct work_struct *t)
{
- struct au1xmmc_host *host = from_tasklet(host, t, data_task);
+ struct au1xmmc_host *host = from_work(host, t, data_bh_work);
u32 status = __raw_readl(HOST_STATUS(host));
au1xmmc_data_complete(host, status);
@@ -388,7 +389,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
/* This is the pointer to the data buffer */
sg = &data->sg[host->pio.index];
- sg_ptr = kmap_atomic(sg_page(sg)) + sg->offset + host->pio.offset;
+ sg_ptr = kmap_local_page(sg_page(sg)) + sg->offset + host->pio.offset;
/* This is the space left inside the buffer */
sg_len = data->sg[host->pio.index].length - host->pio.offset;
@@ -409,7 +410,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
__raw_writel((unsigned long)val, HOST_TXPORT(host));
wmb(); /* drain writebuffer */
}
- kunmap_atomic(sg_ptr);
+ kunmap_local(sg_ptr);
host->pio.len -= count;
host->pio.offset += count;
@@ -425,7 +426,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
- tasklet_schedule(&host->data_task);
+ queue_work(system_bh_wq, &host->data_bh_work);
}
}
@@ -446,7 +447,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
if (host->pio.index < host->dma.len) {
sg = &data->sg[host->pio.index];
- sg_ptr = kmap_atomic(sg_page(sg)) + sg->offset + host->pio.offset;
+ sg_ptr = kmap_local_page(sg_page(sg)) + sg->offset + host->pio.offset;
/* This is the space left inside the buffer */
sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
@@ -488,7 +489,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
sg_ptr[count] = (unsigned char)(val & 0xFF);
}
if (sg_ptr)
- kunmap_atomic(sg_ptr);
+ kunmap_local(sg_ptr);
host->pio.len -= count;
host->pio.offset += count;
@@ -505,7 +506,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
- tasklet_schedule(&host->data_task);
+ queue_work(system_bh_wq, &host->data_bh_work);
}
}
@@ -542,7 +543,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
}
} else {
- /* Techincally, we should be getting all 48 bits of
+ /* Technically, we should be getting all 48 bits of
* the response (SD_RESP1 + SD_RESP2), but because
* our response omits the CRC, our data ends up
* being shifted 8 bits to the right. In this case,
@@ -561,7 +562,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
if (!trans || cmd->error) {
IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
- tasklet_schedule(&host->finish_task);
+ queue_work(system_bh_wq, &host->finish_bh_work);
return;
}
@@ -797,7 +798,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
/* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
- tasklet_schedule(&host->finish_task);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
#if 0
else if (status & SD_STATUS_DD) {
@@ -806,7 +807,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
au1xmmc_receive_pio(host);
else {
au1xmmc_data_complete(host, status);
- /* tasklet_schedule(&host->data_task); */
+ /* queue_work(system_bh_wq, &host->data_bh_work); */
}
}
#endif
@@ -854,7 +855,7 @@ static void au1xmmc_dbdma_callback(int irq, void *dev_id)
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
- tasklet_schedule(&host->data_task);
+ queue_work(system_bh_wq, &host->data_bh_work);
}
static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
@@ -936,11 +937,10 @@ static int au1xmmc_probe(struct platform_device *pdev)
struct resource *r;
int ret, iflag;
- mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
if (!mmc) {
dev_err(&pdev->dev, "no memory for mmc_host\n");
- ret = -ENOMEM;
- goto out0;
+ return -ENOMEM;
}
host = mmc_priv(mmc);
@@ -952,14 +952,14 @@ static int au1xmmc_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no mmio defined\n");
- goto out1;
+ return ret;
}
host->ioarea = request_mem_region(r->start, resource_size(r),
pdev->name);
if (!host->ioarea) {
dev_err(&pdev->dev, "mmio already in use\n");
- goto out1;
+ return ret;
}
host->iobase = ioremap(r->start, 0x3c);
@@ -969,8 +969,10 @@ static int au1xmmc_probe(struct platform_device *pdev)
}
host->irq = platform_get_irq(pdev, 0);
- if (host->irq < 0)
+ if (host->irq < 0) {
+ ret = host->irq;
goto out3;
+ }
mmc->ops = &au1xmmc_ops;
@@ -1037,9 +1039,9 @@ static int au1xmmc_probe(struct platform_device *pdev)
if (host->platdata)
mmc->caps &= ~(host->platdata->mask_host_caps);
- tasklet_setup(&host->data_task, au1xmmc_tasklet_data);
+ INIT_WORK(&host->data_bh_work, au1xmmc_data_bh_work);
- tasklet_setup(&host->finish_task, au1xmmc_tasklet_finish);
+ INIT_WORK(&host->finish_bh_work, au1xmmc_finish_bh_work);
if (has_dbdma()) {
ret = au1xmmc_dbdma_init(host);
@@ -1089,14 +1091,15 @@ out5:
if (host->flags & HOST_F_DBDMA)
au1xmmc_dbdma_shutdown(host);
- tasklet_kill(&host->data_task);
- tasklet_kill(&host->finish_task);
+ cancel_work_sync(&host->data_bh_work);
+ cancel_work_sync(&host->finish_bh_work);
if (host->platdata && host->platdata->cd_setup &&
!(mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(mmc, 0);
-out_clk:
+
clk_disable_unprepare(host->clk);
+out_clk:
clk_put(host->clk);
out_irq:
free_irq(host->irq, host);
@@ -1105,13 +1108,10 @@ out3:
out2:
release_resource(host->ioarea);
kfree(host->ioarea);
-out1:
- mmc_free_host(mmc);
-out0:
return ret;
}
-static int au1xmmc_remove(struct platform_device *pdev)
+static void au1xmmc_remove(struct platform_device *pdev)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
@@ -1132,8 +1132,8 @@ static int au1xmmc_remove(struct platform_device *pdev)
__raw_writel(0, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
- tasklet_kill(&host->data_task);
- tasklet_kill(&host->finish_task);
+ cancel_work_sync(&host->data_bh_work);
+ cancel_work_sync(&host->finish_bh_work);
if (host->flags & HOST_F_DBDMA)
au1xmmc_dbdma_shutdown(host);
@@ -1147,16 +1147,12 @@ static int au1xmmc_remove(struct platform_device *pdev)
iounmap((void *)host->iobase);
release_resource(host->ioarea);
kfree(host->ioarea);
-
- mmc_free_host(host->mmc);
}
- return 0;
}
-#ifdef CONFIG_PM
-static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
+static int au1xmmc_suspend(struct device *dev)
{
- struct au1xmmc_host *host = platform_get_drvdata(pdev);
+ struct au1xmmc_host *host = dev_get_drvdata(dev);
__raw_writel(0, HOST_CONFIG2(host));
__raw_writel(0, HOST_CONFIG(host));
@@ -1167,27 +1163,24 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int au1xmmc_resume(struct platform_device *pdev)
+static int au1xmmc_resume(struct device *dev)
{
- struct au1xmmc_host *host = platform_get_drvdata(pdev);
+ struct au1xmmc_host *host = dev_get_drvdata(dev);
au1xmmc_reset_controller(host);
return 0;
}
-#else
-#define au1xmmc_suspend NULL
-#define au1xmmc_resume NULL
-#endif
+
+static DEFINE_SIMPLE_DEV_PM_OPS(au1xmmc_pmops, au1xmmc_suspend, au1xmmc_resume);
static struct platform_driver au1xmmc_driver = {
.probe = au1xmmc_probe,
.remove = au1xmmc_remove,
- .suspend = au1xmmc_suspend,
- .resume = au1xmmc_resume,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = pm_sleep_ptr(&au1xmmc_pmops),
},
};
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 8c2361e66277..ee63835b3ca0 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -44,6 +44,7 @@
#include <linux/scatterlist.h>
#include <linux/time.h>
#include <linux/workqueue.h>
+#include <linux/string_choices.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
@@ -148,9 +149,10 @@ struct bcm2835_host {
void __iomem *ioaddr;
u32 phys_addr;
+ struct clk *clk;
struct platform_device *pdev;
- int clock; /* Current clock speed */
+ unsigned int clock; /* Current clock speed */
unsigned int max_clk; /* Max possible freq */
struct work_struct dma_work;
struct delayed_work timeout_work; /* Timer for timeouts */
@@ -327,7 +329,6 @@ static void bcm2835_dma_complete(void *param)
static void bcm2835_transfer_block_pio(struct bcm2835_host *host, bool is_read)
{
- unsigned long flags;
size_t blksize;
unsigned long wait_max;
@@ -335,8 +336,6 @@ static void bcm2835_transfer_block_pio(struct bcm2835_host *host, bool is_read)
wait_max = jiffies + msecs_to_jiffies(500);
- local_irq_save(flags);
-
while (blksize) {
int copy_words;
u32 hsts = 0;
@@ -393,8 +392,7 @@ static void bcm2835_transfer_block_pio(struct bcm2835_host *host, bool is_read)
if (time_after(jiffies, wait_max)) {
dev_err(dev, "PIO %s timeout - EDM %08x\n",
- is_read ? "read" : "write",
- edm);
+ str_read_write(is_read), edm);
hsts = SDHSTS_REW_TIME_OUT;
break;
}
@@ -421,8 +419,6 @@ static void bcm2835_transfer_block_pio(struct bcm2835_host *host, bool is_read)
}
sg_miter_stop(&host->sg_miter);
-
- local_irq_restore(flags);
}
static void bcm2835_transfer_pio(struct bcm2835_host *host)
@@ -439,12 +435,12 @@ static void bcm2835_transfer_pio(struct bcm2835_host *host)
SDHSTS_CRC7_ERROR |
SDHSTS_FIFO_ERROR)) {
dev_err(dev, "%s transfer error - HSTS %08x\n",
- is_read ? "read" : "write", sdhsts);
+ str_read_write(is_read), sdhsts);
host->data->error = -EILSEQ;
} else if ((sdhsts & (SDHSTS_CMD_TIME_OUT |
SDHSTS_REW_TIME_OUT))) {
dev_err(dev, "%s timeout error - HSTS %08x\n",
- is_read ? "read" : "write", sdhsts);
+ str_read_write(is_read), sdhsts);
host->data->error = -ETIMEDOUT;
}
}
@@ -507,7 +503,8 @@ void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data)
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
- dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data);
+ dma_unmap_sg(dma_chan->device->dev, data->sg, data->sg_len,
+ dir_data);
return;
}
@@ -1068,7 +1065,6 @@ static void bcm2835_dma_complete_work(struct work_struct *work)
}
if (host->drain_words) {
- unsigned long flags;
void *page;
u32 *buf;
@@ -1076,8 +1072,7 @@ static void bcm2835_dma_complete_work(struct work_struct *work)
host->drain_page += host->drain_offset >> PAGE_SHIFT;
host->drain_offset &= ~PAGE_MASK;
}
- local_irq_save(flags);
- page = kmap_atomic(host->drain_page);
+ page = kmap_local_page(host->drain_page);
buf = page + host->drain_offset;
while (host->drain_words) {
@@ -1088,8 +1083,7 @@ static void bcm2835_dma_complete_work(struct work_struct *work)
host->drain_words--;
}
- kunmap_atomic(page);
- local_irq_restore(flags);
+ kunmap_local(page);
}
bcm2835_finish_data(host);
@@ -1259,7 +1253,7 @@ static void bcm2835_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static const struct mmc_host_ops bcm2835_ops = {
.request = bcm2835_request,
.set_ios = bcm2835_set_ios,
- .hw_reset = bcm2835_reset,
+ .card_hw_reset = bcm2835_reset,
};
static int bcm2835_add_host(struct bcm2835_host *host)
@@ -1293,14 +1287,12 @@ static int bcm2835_add_host(struct bcm2835_host *host)
host->dma_cfg_tx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_cfg_tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- host->dma_cfg_tx.slave_id = 13; /* DREQ channel */
host->dma_cfg_tx.direction = DMA_MEM_TO_DEV;
host->dma_cfg_tx.src_addr = 0;
host->dma_cfg_tx.dst_addr = host->phys_addr + SDDATA;
host->dma_cfg_rx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_cfg_rx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- host->dma_cfg_rx.slave_id = 13; /* DREQ channel */
host->dma_cfg_rx.direction = DMA_DEV_TO_MEM;
host->dma_cfg_rx.src_addr = host->phys_addr + SDDATA;
host->dma_cfg_rx.dst_addr = 0;
@@ -1352,17 +1344,35 @@ static int bcm2835_add_host(struct bcm2835_host *host)
return 0;
}
+static int bcm2835_suspend(struct device *dev)
+{
+ struct bcm2835_host *host = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+static int bcm2835_resume(struct device *dev)
+{
+ struct bcm2835_host *host = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(host->clk);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm2835_pm_ops, bcm2835_suspend,
+ bcm2835_resume);
+
static int bcm2835_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct clk *clk;
struct bcm2835_host *host;
struct mmc_host *mmc;
const __be32 *regaddr_p;
int ret;
dev_dbg(dev, "%s\n", __func__);
- mmc = mmc_alloc_host(sizeof(*host), dev);
+ mmc = devm_mmc_alloc_host(dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
@@ -1403,28 +1413,31 @@ static int bcm2835_probe(struct platform_device *pdev)
/* Ignore errors to fall back to PIO mode */
}
-
- clk = devm_clk_get(dev, NULL);
- if (IS_ERR(clk)) {
- ret = dev_err_probe(dev, PTR_ERR(clk), "could not get clk\n");
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0) {
+ ret = host->irq;
goto err;
}
- host->max_clk = clk_get_rate(clk);
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err;
- host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0) {
- ret = -EINVAL;
+ host->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(host->clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(host->clk), "could not get clk\n");
goto err;
}
- ret = mmc_of_parse(mmc);
+ ret = clk_prepare_enable(host->clk);
if (ret)
goto err;
+ host->max_clk = clk_get_rate(host->clk);
+
ret = bcm2835_add_host(host);
if (ret)
- goto err;
+ goto err_clk;
platform_set_drvdata(pdev, host);
@@ -1432,16 +1445,17 @@ static int bcm2835_probe(struct platform_device *pdev)
return 0;
+err_clk:
+ clk_disable_unprepare(host->clk);
err:
dev_dbg(dev, "%s -> err %d\n", __func__, ret);
if (host->dma_chan_rxtx)
dma_release_channel(host->dma_chan_rxtx);
- mmc_free_host(mmc);
return ret;
}
-static int bcm2835_remove(struct platform_device *pdev)
+static void bcm2835_remove(struct platform_device *pdev)
{
struct bcm2835_host *host = platform_get_drvdata(pdev);
struct mmc_host *mmc = mmc_from_priv(host);
@@ -1455,12 +1469,10 @@ static int bcm2835_remove(struct platform_device *pdev)
cancel_work_sync(&host->dma_work);
cancel_delayed_work_sync(&host->timeout_work);
+ clk_disable_unprepare(host->clk);
+
if (host->dma_chan_rxtx)
dma_release_channel(host->dma_chan_rxtx);
-
- mmc_free_host(mmc);
-
- return 0;
}
static const struct of_device_id bcm2835_match[] = {
@@ -1476,6 +1488,7 @@ static struct platform_driver bcm2835_driver = {
.name = "sdhost-bcm2835",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = bcm2835_match,
+ .pm = pm_ptr(&bcm2835_pm_ops),
},
};
module_platform_driver(bcm2835_driver);
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 2c4b2df52adb..0592f356b1e5 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -13,7 +13,9 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/octeon/octeon.h>
#include "cavium.h"
@@ -215,7 +217,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
return PTR_ERR(base);
host->dma_base = base;
/*
- * To keep the register addresses shared we intentionaly use
+ * To keep the register addresses shared we intentionally use
* a negative offset here, first register used on Octeon therefore
* starts at 0x20 (MIO_EMM_DMA_CFG).
*/
@@ -277,6 +279,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Error populating slots\n");
octeon_mmc_set_shared_power(host, 0);
+ of_node_put(cn);
goto error;
}
i++;
@@ -293,7 +296,7 @@ error:
return ret;
}
-static int octeon_mmc_remove(struct platform_device *pdev)
+static void octeon_mmc_remove(struct platform_device *pdev)
{
struct cvm_mmc_host *host = platform_get_drvdata(pdev);
u64 dma_cfg;
@@ -308,7 +311,6 @@ static int octeon_mmc_remove(struct platform_device *pdev)
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
octeon_mmc_set_shared_power(host, 0);
- return 0;
}
static const struct of_device_id octeon_mmc_match[] = {
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index 76013bbbcff3..1373deb3f531 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pci.h>
#include "cavium.h"
@@ -71,7 +72,7 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ ret = pcim_request_all_regions(pdev, KBUILD_MODNAME);
if (ret)
return ret;
@@ -142,8 +143,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
continue;
ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
- if (ret)
+ if (ret) {
+ of_node_put(child_node);
goto error;
+ }
}
i++;
}
@@ -161,7 +164,6 @@ error:
}
}
clk_disable_unprepare(host->clk);
- pci_release_regions(pdev);
return ret;
}
@@ -180,7 +182,6 @@ static void thunder_mmc_remove(struct pci_dev *pdev)
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
clk_disable_unprepare(host->clk);
- pci_release_regions(pdev);
}
static const struct pci_device_id thunder_mmc_id_table[] = {
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 95a41983c6c0..9a55db0e657c 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -1012,7 +1012,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
struct mmc_host *mmc;
int ret, id;
- mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
+ mmc = devm_mmc_alloc_host(dev, sizeof(*slot));
if (!mmc)
return -ENOMEM;
@@ -1022,7 +1022,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
ret = cvm_mmc_of_parse(dev, slot);
if (ret < 0)
- goto error;
+ return ret;
id = ret;
/* Set up host parameters */
@@ -1066,12 +1066,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
if (ret) {
dev_err(dev, "mmc_add_host() returned %d\n", ret);
slot->host->slot[id] = NULL;
- goto error;
}
- return 0;
-
-error:
- mmc_free_host(slot->mmc);
return ret;
}
@@ -1079,6 +1074,5 @@ int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
{
mmc_remove_host(slot->mmc);
slot->host->slot[slot->bus_id] = NULL;
- mmc_free_host(slot->mmc);
return 0;
}
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 6d623b2681c3..31daec787495 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include "cb710-mmc.h"
#define CB710_MMC_REQ_TIMEOUT_MS 2000
@@ -215,7 +216,7 @@ static void cb710_mmc_set_transfer_size(struct cb710_slot *slot,
((count - 1) << 16)|(blocksize - 1));
dev_vdbg(cb710_slot_dev(slot), "set up for %zu block%s of %zu bytes\n",
- count, count == 1 ? "" : "s", blocksize);
+ count, str_plural(count), blocksize);
}
static void cb710_mmc_fifo_hack(struct cb710_slot *slot)
@@ -493,7 +494,7 @@ static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop)
cb710_mmc_command(mmc, mrq->stop);
- tasklet_schedule(&reader->finish_req_tasklet);
+ queue_work(system_bh_wq, &reader->finish_req_bh_work);
}
static int cb710_mmc_powerup(struct cb710_slot *slot)
@@ -646,10 +647,10 @@ static int cb710_mmc_irq_handler(struct cb710_slot *slot)
return 1;
}
-static void cb710_mmc_finish_request_tasklet(struct tasklet_struct *t)
+static void cb710_mmc_finish_request_bh_work(struct work_struct *t)
{
- struct cb710_mmc_reader *reader = from_tasklet(reader, t,
- finish_req_tasklet);
+ struct cb710_mmc_reader *reader = from_work(reader, t,
+ finish_req_bh_work);
struct mmc_request *mrq = reader->mrq;
reader->mrq = NULL;
@@ -663,25 +664,25 @@ static const struct mmc_host_ops cb710_mmc_host = {
.get_cd = cb710_mmc_get_cd,
};
-#ifdef CONFIG_PM
-
-static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
+static int cb710_mmc_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
cb710_mmc_enable_irq(slot, 0, ~0);
return 0;
}
-static int cb710_mmc_resume(struct platform_device *pdev)
+static int cb710_mmc_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
cb710_mmc_enable_irq(slot, 0, ~0);
return 0;
}
-#endif /* CONFIG_PM */
+static DEFINE_SIMPLE_DEV_PM_OPS(cb710_mmc_pmops, cb710_mmc_suspend, cb710_mmc_resume);
static int cb710_mmc_init(struct platform_device *pdev)
{
@@ -692,7 +693,7 @@ static int cb710_mmc_init(struct platform_device *pdev)
int err;
u32 val;
- mmc = mmc_alloc_host(sizeof(*reader), cb710_slot_dev(slot));
+ mmc = devm_mmc_alloc_host(cb710_slot_dev(slot), sizeof(*reader));
if (!mmc)
return -ENOMEM;
@@ -718,8 +719,8 @@ static int cb710_mmc_init(struct platform_device *pdev)
reader = mmc_priv(mmc);
- tasklet_setup(&reader->finish_req_tasklet,
- cb710_mmc_finish_request_tasklet);
+ INIT_WORK(&reader->finish_req_bh_work,
+ cb710_mmc_finish_request_bh_work);
spin_lock_init(&reader->irq_lock);
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
@@ -741,11 +742,10 @@ err_free_mmc:
dev_dbg(cb710_slot_dev(slot), "mmc_add_host() failed: %d\n", err);
cb710_set_irq_handler(slot, NULL);
- mmc_free_host(mmc);
return err;
}
-static int cb710_mmc_exit(struct platform_device *pdev)
+static void cb710_mmc_exit(struct platform_device *pdev)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
struct mmc_host *mmc = cb710_slot_to_mmc(slot);
@@ -763,20 +763,16 @@ static int cb710_mmc_exit(struct platform_device *pdev)
cb710_write_port_32(slot, CB710_MMC_CONFIG_PORT, 0);
cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0);
- tasklet_kill(&reader->finish_req_tasklet);
-
- mmc_free_host(mmc);
- return 0;
+ cancel_work_sync(&reader->finish_req_bh_work);
}
static struct platform_driver cb710_mmc_driver = {
- .driver.name = "cb710-mmc",
+ .driver = {
+ .name = "cb710-mmc",
+ .pm = pm_sleep_ptr(&cb710_mmc_pmops),
+ },
.probe = cb710_mmc_init,
.remove = cb710_mmc_exit,
-#ifdef CONFIG_PM
- .suspend = cb710_mmc_suspend,
- .resume = cb710_mmc_resume,
-#endif
};
module_platform_driver(cb710_mmc_driver);
diff --git a/drivers/mmc/host/cb710-mmc.h b/drivers/mmc/host/cb710-mmc.h
index 5e053077dbed..59abaccaad10 100644
--- a/drivers/mmc/host/cb710-mmc.h
+++ b/drivers/mmc/host/cb710-mmc.h
@@ -8,10 +8,11 @@
#define LINUX_CB710_MMC_H
#include <linux/cb710.h>
+#include <linux/workqueue.h>
/* per-MMC-reader structure */
struct cb710_mmc_reader {
- struct tasklet_struct finish_req_tasklet;
+ struct work_struct finish_req_bh_work;
struct mmc_request *mrq;
spinlock_t irq_lock;
unsigned char last_power_mode;
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index 38559a956330..178277d90c31 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -33,6 +33,11 @@ struct cqhci_slot {
#define CQHCI_HOST_OTHER BIT(4)
};
+static bool cqhci_halted(struct cqhci_host *cq_host)
+{
+ return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
+}
+
static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
{
return cq_host->desc_base + (tag * cq_host->slot_sz);
@@ -282,6 +287,9 @@ static void __cqhci_enable(struct cqhci_host *cq_host)
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ if (cqhci_halted(cq_host))
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+
mmc->cqe_on = true;
if (cq_host->ops->enable)
@@ -471,8 +479,8 @@ static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
return sg_count;
}
-static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
- bool dma64)
+void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
+ bool dma64)
{
__le32 *attr = (__le32 __force *)desc;
@@ -492,6 +500,7 @@ static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
dataddr[0] = cpu_to_le32(addr);
}
}
+EXPORT_SYMBOL(cqhci_set_tran_desc);
static int cqhci_prep_tran_desc(struct mmc_request *mrq,
struct cqhci_host *cq_host, int tag)
@@ -519,7 +528,11 @@ static int cqhci_prep_tran_desc(struct mmc_request *mrq,
if ((i+1) == sg_count)
end = true;
- cqhci_set_tran_desc(desc, addr, len, end, dma64);
+ if (cq_host->ops->set_tran_desc)
+ cq_host->ops->set_tran_desc(cq_host, &desc, addr, len, end, dma64);
+ else
+ cqhci_set_tran_desc(desc, addr, len, end, dma64);
+
desc += cq_host->trans_desc_len;
}
@@ -609,7 +622,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
cqhci_writel(cq_host, 0, CQHCI_CTL);
mmc->cqe_on = true;
pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
- if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
+ if (cqhci_halted(cq_host)) {
pr_err("%s: cqhci: CQE failed to exit halt state\n",
mmc_hostname(mmc));
}
@@ -819,8 +832,15 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
- cmd_error || data_error)
+ cmd_error || data_error) {
+ if (status & CQHCI_IS_RED)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_RED);
+ if (status & CQHCI_IS_GCE)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_GCE);
+ if (status & CQHCI_IS_ICCE)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_ICCE);
cqhci_error_irq(mmc, status, cmd_error, data_error);
+ }
if (status & CQHCI_IS_TCC) {
/* read TCN and complete the request */
@@ -899,8 +919,8 @@ static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
spin_unlock_irqrestore(&cq_host->lock, flags);
if (timed_out) {
- pr_err("%s: cqhci: timeout for tag %d\n",
- mmc_hostname(mmc), tag);
+ pr_err("%s: cqhci: timeout for tag %d, qcnt %d\n",
+ mmc_hostname(mmc), tag, cq_host->qcnt);
cqhci_dumpregs(cq_host);
}
@@ -932,17 +952,12 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_tasks_cleared(cq_host);
if (!ret)
- pr_debug("%s: cqhci: Failed to clear tasks\n",
- mmc_hostname(mmc));
+ pr_warn("%s: cqhci: Failed to clear tasks\n",
+ mmc_hostname(mmc));
return ret;
}
-static bool cqhci_halted(struct cqhci_host *cq_host)
-{
- return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
-}
-
static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
{
struct cqhci_host *cq_host = mmc->cqe_private;
@@ -966,7 +981,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_halted(cq_host);
if (!ret)
- pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+ pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
return ret;
}
@@ -974,10 +989,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
/*
* After halting we expect to be able to use the command line. We interpret the
* failure to halt to mean the data lines might still be in use (and the upper
- * layers will need to send a STOP command), so we set the timeout based on a
- * generous command timeout.
+ * layers will need to send a STOP command), however failing to halt complicates
+ * the recovery, so set a timeout that would reasonably allow I/O to complete.
*/
-#define CQHCI_START_HALT_TIMEOUT 5
+#define CQHCI_START_HALT_TIMEOUT 500
static void cqhci_recovery_start(struct mmc_host *mmc)
{
@@ -1065,28 +1080,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
- ok = false;
-
/*
* The specification contradicts itself, by saying that tasks cannot be
* cleared if CQHCI does not halt, but if CQHCI does not halt, it should
* be disabled/re-enabled, but not to disable before clearing tasks.
* Have a go anyway.
*/
- if (!ok) {
- pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
- cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
- cqcfg &= ~CQHCI_ENABLE;
- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
- cqcfg |= CQHCI_ENABLE;
- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
- /* Be sure that there are no tasks */
- ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
- ok = false;
- WARN_ON(!ok);
- }
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+
+ /* Disable to make sure tasks really are cleared */
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg |= CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+ if (!ok)
+ cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
cqhci_recover_mrqs(cq_host);
diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c
index 6419cfbb4ab7..5a467098a0d6 100644
--- a/drivers/mmc/host/cqhci-crypto.c
+++ b/drivers/mmc/host/cqhci-crypto.c
@@ -6,7 +6,7 @@
*/
#include <linux/blk-crypto.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#include <linux/mmc/host.h>
#include "cqhci-crypto.h"
@@ -23,23 +23,18 @@ static const struct cqhci_crypto_alg_entry {
};
static inline struct cqhci_host *
-cqhci_host_from_ksm(struct blk_keyslot_manager *ksm)
+cqhci_host_from_crypto_profile(struct blk_crypto_profile *profile)
{
- struct mmc_host *mmc = container_of(ksm, struct mmc_host, ksm);
-
- return mmc->cqe_private;
+ return mmc_from_crypto_profile(profile)->cqe_private;
}
-static int cqhci_crypto_program_key(struct cqhci_host *cq_host,
- const union cqhci_crypto_cfg_entry *cfg,
- int slot)
+static void cqhci_crypto_program_key(struct cqhci_host *cq_host,
+ const union cqhci_crypto_cfg_entry *cfg,
+ int slot)
{
u32 slot_offset = cq_host->crypto_cfg_register + slot * sizeof(*cfg);
int i;
- if (cq_host->ops->program_key)
- return cq_host->ops->program_key(cq_host, cfg, slot);
-
/* Clear CFGE */
cqhci_writel(cq_host, 0, slot_offset + 16 * sizeof(cfg->reg_val[0]));
@@ -54,15 +49,14 @@ static int cqhci_crypto_program_key(struct cqhci_host *cq_host,
/* Write dword 16, which includes the new value of CFGE */
cqhci_writel(cq_host, le32_to_cpu(cfg->reg_val[16]),
slot_offset + 16 * sizeof(cfg->reg_val[0]));
- return 0;
}
-static int cqhci_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
+static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm);
+ struct cqhci_host *cq_host = cqhci_host_from_crypto_profile(profile);
const union cqhci_crypto_cap_entry *ccap_array =
cq_host->crypto_cap_array;
const struct cqhci_crypto_alg_entry *alg =
@@ -71,7 +65,6 @@ static int cqhci_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
int i;
int cap_idx = -1;
union cqhci_crypto_cfg_entry cfg = {};
- int err;
BUILD_BUG_ON(CQHCI_CRYPTO_KEY_SIZE_INVALID != 0);
for (i = 0; i < cq_host->crypto_capabilities.num_crypto_cap; i++) {
@@ -91,17 +84,17 @@ static int cqhci_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
if (ccap_array[cap_idx].algorithm_id == CQHCI_CRYPTO_ALG_AES_XTS) {
/* In XTS mode, the blk_crypto_key's size is already doubled */
- memcpy(cfg.crypto_key, key->raw, key->size/2);
+ memcpy(cfg.crypto_key, key->bytes, key->size/2);
memcpy(cfg.crypto_key + CQHCI_CRYPTO_KEY_MAX_SIZE/2,
- key->raw + key->size/2, key->size/2);
+ key->bytes + key->size/2, key->size/2);
} else {
- memcpy(cfg.crypto_key, key->raw, key->size);
+ memcpy(cfg.crypto_key, key->bytes, key->size);
}
- err = cqhci_crypto_program_key(cq_host, &cfg, slot);
+ cqhci_crypto_program_key(cq_host, &cfg, slot);
memzero_explicit(&cfg, sizeof(cfg));
- return err;
+ return 0;
}
static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot)
@@ -112,14 +105,15 @@ static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot)
*/
union cqhci_crypto_cfg_entry cfg = {};
- return cqhci_crypto_program_key(cq_host, &cfg, slot);
+ cqhci_crypto_program_key(cq_host, &cfg, slot);
+ return 0;
}
-static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
+static int cqhci_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm);
+ struct cqhci_host *cq_host = cqhci_host_from_crypto_profile(profile);
return cqhci_crypto_clear_keyslot(cq_host, slot);
}
@@ -132,7 +126,7 @@ static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
* "enabled" when these are called, i.e. CQHCI_ENABLE might not be set in the
* CQHCI_CFG register. But the hardware allows that.
*/
-static const struct blk_ksm_ll_ops cqhci_ksm_ops = {
+static const struct blk_crypto_ll_ops cqhci_crypto_ops = {
.keyslot_program = cqhci_crypto_keyslot_program,
.keyslot_evict = cqhci_crypto_keyslot_evict,
};
@@ -157,8 +151,8 @@ cqhci_find_blk_crypto_mode(union cqhci_crypto_cap_entry cap)
*
* If the driver previously set MMC_CAP2_CRYPTO and the CQE declares
* CQHCI_CAP_CS, initialize the crypto support. This involves reading the
- * crypto capability registers, initializing the keyslot manager, clearing all
- * keyslots, and enabling 128-bit task descriptors.
+ * crypto capability registers, initializing the blk_crypto_profile, clearing
+ * all keyslots, and enabling 128-bit task descriptors.
*
* Return: 0 if crypto was initialized or isn't supported; whether
* MMC_CAP2_CRYPTO remains set indicates which one of those cases it is.
@@ -168,8 +162,7 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
{
struct mmc_host *mmc = cq_host->mmc;
struct device *dev = mmc_dev(mmc);
- struct blk_keyslot_manager *ksm = &mmc->ksm;
- unsigned int num_keyslots;
+ struct blk_crypto_profile *profile = &mmc->crypto_profile;
unsigned int cap_idx;
enum blk_crypto_mode_num blk_mode_num;
unsigned int slot;
@@ -179,6 +172,9 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
goto out;
+ if (cq_host->ops->uses_custom_crypto_profile)
+ goto profile_initialized;
+
cq_host->crypto_capabilities.reg_val =
cpu_to_le32(cqhci_readl(cq_host, CQHCI_CCAP));
@@ -197,17 +193,18 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
* CCAP.CFGC is off by one, so the actual number of crypto
* configurations (a.k.a. keyslots) is CCAP.CFGC + 1.
*/
- num_keyslots = cq_host->crypto_capabilities.config_count + 1;
-
- err = devm_blk_ksm_init(dev, ksm, num_keyslots);
+ err = devm_blk_crypto_profile_init(
+ dev, profile, cq_host->crypto_capabilities.config_count + 1);
if (err)
goto out;
- ksm->ksm_ll_ops = cqhci_ksm_ops;
- ksm->dev = dev;
+ profile->ll_ops = cqhci_crypto_ops;
+ profile->dev = dev;
/* Unfortunately, CQHCI crypto only supports 32 DUN bits. */
- ksm->max_dun_bytes_supported = 4;
+ profile->max_dun_bytes_supported = 4;
+
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
/*
* Cache all the crypto capabilities and advertise the supported crypto
@@ -223,13 +220,15 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
cq_host->crypto_cap_array[cap_idx]);
if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID)
continue;
- ksm->crypto_modes_supported[blk_mode_num] |=
+ profile->modes_supported[blk_mode_num] |=
cq_host->crypto_cap_array[cap_idx].sdus_mask * 512;
}
+profile_initialized:
+
/* Clear all the keyslots so that we start in a known state. */
- for (slot = 0; slot < num_keyslots; slot++)
- cqhci_crypto_clear_keyslot(cq_host, slot);
+ for (slot = 0; slot < profile->num_slots; slot++)
+ profile->ll_ops.keyslot_evict(profile, NULL, slot);
/* CQHCI crypto requires the use of 128-bit task descriptors. */
cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
diff --git a/drivers/mmc/host/cqhci-crypto.h b/drivers/mmc/host/cqhci-crypto.h
index 60b58ee0e625..d7fb084f563b 100644
--- a/drivers/mmc/host/cqhci-crypto.h
+++ b/drivers/mmc/host/cqhci-crypto.h
@@ -22,12 +22,15 @@ int cqhci_crypto_init(struct cqhci_host *host);
*/
static inline u64 cqhci_crypto_prep_task_desc(struct mmc_request *mrq)
{
- if (!mrq->crypto_enabled)
+ if (!mrq->crypto_ctx)
return 0;
+ /* We set max_dun_bytes_supported=4, so all DUNs should be 32-bit. */
+ WARN_ON_ONCE(mrq->crypto_ctx->bc_dun[0] > U32_MAX);
+
return CQHCI_CRYPTO_ENABLE_BIT |
CQHCI_CRYPTO_KEYSLOT(mrq->crypto_key_slot) |
- mrq->data_unit_num;
+ mrq->crypto_ctx->bc_dun[0];
}
#else /* CONFIG_MMC_CRYPTO */
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
index ba9387ed90eb..3668856531c1 100644
--- a/drivers/mmc/host/cqhci.h
+++ b/drivers/mmc/host/cqhci.h
@@ -5,6 +5,7 @@
#define LINUX_MMC_CQHCI_H
#include <linux/compiler.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
@@ -23,6 +24,8 @@
/* capabilities */
#define CQHCI_CAP 0x04
#define CQHCI_CAP_CS 0x10000000 /* Crypto Support */
+#define CQHCI_CAP_ITCFMUL GENMASK(15, 12)
+#define CQHCI_ITCFMUL(x) FIELD_GET(CQHCI_CAP_ITCFMUL, (x))
/* configuration */
#define CQHCI_CFG 0x08
@@ -90,6 +93,7 @@
/* send status config 1 */
#define CQHCI_SSC1 0x40
#define CQHCI_SSC1_CBC_MASK GENMASK(19, 16)
+#define CQHCI_SSC1_CIT_MASK GENMASK(15, 0)
/* send status config 2 */
#define CQHCI_SSC2 0x44
@@ -286,9 +290,10 @@ struct cqhci_host_ops {
u64 *data);
void (*pre_enable)(struct mmc_host *mmc);
void (*post_disable)(struct mmc_host *mmc);
+ void (*set_tran_desc)(struct cqhci_host *cq_host, u8 **desc,
+ dma_addr_t addr, int len, bool end, bool dma64);
#ifdef CONFIG_MMC_CRYPTO
- int (*program_key)(struct cqhci_host *cq_host,
- const union cqhci_crypto_cfg_entry *cfg, int slot);
+ bool uses_custom_crypto_profile;
#endif
};
@@ -315,6 +320,7 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64);
struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev);
int cqhci_deactivate(struct mmc_host *mmc);
+void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end, bool dma64);
static inline int cqhci_suspend(struct mmc_host *mmc)
{
return cqhci_deactivate(mmc);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 2a757c88f9d2..42b0118a45a8 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -7,25 +7,23 @@
* Copyright (C) 2009 David Brownell
*/
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/err.h>
#include <linux/cpufreq.h>
-#include <linux/mmc/host.h>
-#include <linux/io.h>
-#include <linux/irq.h>
#include <linux/delay.h>
-#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mmc/slot-gpio.h>
-#include <linux/interrupt.h>
-
+#include <linux/module.h>
#include <linux/platform_data/mmc-davinci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
/*
* Register Definitions
@@ -147,17 +145,17 @@
#define MAX_NR_SG 16
static unsigned rw_threshold = 32;
-module_param(rw_threshold, uint, S_IRUGO);
+module_param(rw_threshold, uint, 0444);
MODULE_PARM_DESC(rw_threshold,
"Read/Write threshold. Default = 32");
static unsigned poll_threshold = 128;
-module_param(poll_threshold, uint, S_IRUGO);
+module_param(poll_threshold, uint, 0444);
MODULE_PARM_DESC(poll_threshold,
"Polling transaction size threshold. Default = 128");
static unsigned poll_loopcount = 32;
-module_param(poll_loopcount, uint, S_IRUGO);
+module_param(poll_loopcount, uint, 0444);
MODULE_PARM_DESC(poll_loopcount,
"Maximum polling loop count. Default = 32");
@@ -181,12 +179,6 @@ struct mmc_davinci_host {
#define DAVINCI_MMC_DATADIR_WRITE 2
unsigned char data_dir;
- /* buffer is used during PIO of one scatterlist segment, and
- * is updated along with buffer_bytes_left. bytes_left applies
- * to all N blocks of the PIO transfer.
- */
- u8 *buffer;
- u32 buffer_bytes_left;
u32 bytes_left;
struct dma_chan *dma_tx;
@@ -197,8 +189,8 @@ struct mmc_davinci_host {
bool active_request;
/* For PIO we walk scatterlists one segment at a time. */
+ struct sg_mapping_iter sg_miter;
unsigned int sg_len;
- struct scatterlist *sg;
/* Version of the MMC/SD controller */
u8 version;
@@ -214,30 +206,25 @@ struct mmc_davinci_host {
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);
/* PIO only */
-static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
-{
- host->buffer_bytes_left = sg_dma_len(host->sg);
- host->buffer = sg_virt(host->sg);
- if (host->buffer_bytes_left > host->bytes_left)
- host->buffer_bytes_left = host->bytes_left;
-}
-
static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
unsigned int n)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
u8 *p;
unsigned int i;
- if (host->buffer_bytes_left == 0) {
- host->sg = sg_next(host->data->sg);
- mmc_davinci_sg_to_buf(host);
+ /*
+ * By adjusting sgm->consumed this will give a pointer to the
+ * current index into the sgm.
+ */
+ if (!sg_miter_next(sgm)) {
+ dev_err(mmc_dev(host->mmc), "ran out of sglist prematurely\n");
+ return;
}
+ p = sgm->addr;
- p = host->buffer;
- if (n > host->buffer_bytes_left)
- n = host->buffer_bytes_left;
- host->buffer_bytes_left -= n;
- host->bytes_left -= n;
+ if (n > sgm->length)
+ n = sgm->length;
/* NOTE: we never transfer more than rw_threshold bytes
* to/from the fifo here; there's no I/O overlap.
@@ -262,7 +249,9 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
p = p + (n & 3);
}
}
- host->buffer = p;
+
+ sgm->consumed = n;
+ host->bytes_left -= n;
}
static void mmc_davinci_start_command(struct mmc_davinci_host *host,
@@ -518,6 +507,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
int timeout;
struct mmc_data *data = req->data;
+ unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */
if (host->version == MMC_CTLR_VERSION_2)
fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
@@ -546,12 +536,14 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
/* Configure the FIFO */
if (data->flags & MMC_DATA_WRITE) {
+ flags |= SG_MITER_FROM_SG;
host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
host->base + DAVINCI_MMCFIFOCTL);
} else {
+ flags |= SG_MITER_TO_SG;
host->data_dir = DAVINCI_MMC_DATADIR_READ;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
@@ -559,7 +551,6 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
host->base + DAVINCI_MMCFIFOCTL);
}
- host->buffer = NULL;
host->bytes_left = data->blocks * data->blksz;
/* For now we try to use DMA whenever we won't need partial FIFO
@@ -577,8 +568,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
} else {
/* Revert to CPU Copy */
host->sg_len = data->sg_len;
- host->sg = host->data->sg;
- mmc_davinci_sg_to_buf(host);
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
}
@@ -598,7 +588,7 @@ static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
cpu_relax();
}
if (mmcst1 & MMCST1_BUSY) {
- dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
+ dev_err(mmc_dev(host->mmc), "still BUSY? bad ...\n");
req->cmd->error = -ETIMEDOUT;
mmc_request_done(mmc, req);
return;
@@ -844,6 +834,8 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
{
mmc_davinci_reset_ctrl(host, 1);
mmc_davinci_reset_ctrl(host, 0);
+ if (!host->do_dma)
+ sg_miter_stop(&host->sg_miter);
}
static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
@@ -920,11 +912,13 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
if (qstatus & MMCST0_DATDNE) {
/* All blocks sent/received, and CRC checks passed */
if (data != NULL) {
- if ((host->do_dma == 0) && (host->bytes_left > 0)) {
- /* if datasize < rw_threshold
- * no RX ints are generated
- */
- davinci_fifo_data_trans(host, host->bytes_left);
+ if (!host->do_dma) {
+ if (host->bytes_left > 0)
+ /* if datasize < rw_threshold
+ * no RX ints are generated
+ */
+ davinci_fifo_data_trans(host, host->bytes_left);
+ sg_miter_stop(&host->sg_miter);
}
end_transfer = 1;
data->bytes_xfered = data->blocks * data->blksz;
@@ -1189,11 +1183,10 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
static int davinci_mmcsd_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct mmc_davinci_host *host = NULL;
struct mmc_host *mmc = NULL;
struct resource *r, *mem = NULL;
- int ret, irq;
+ int ret, irq, bus_width;
size_t mem_size;
const struct platform_device_id *id_entry;
@@ -1210,7 +1203,7 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
if (!mem)
return -EBUSY;
- mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
@@ -1219,25 +1212,21 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
host->mem_res = mem;
host->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
- if (!host->base) {
- ret = -ENOMEM;
- goto ioremap_fail;
- }
+ if (!host->base)
+ return -ENOMEM;
host->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- goto clk_get_fail;
- }
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
ret = clk_prepare_enable(host->clk);
if (ret)
- goto clk_prepare_enable_fail;
+ return ret;
host->mmc_input_clk = clk_get_rate(host->clk);
- match = of_match_device(davinci_mmc_dt_ids, &pdev->dev);
- if (match) {
- pdev->id_entry = match->data;
+ pdev->id_entry = device_get_match_data(&pdev->dev);
+ if (pdev->id_entry) {
ret = mmc_of_parse(mmc);
if (ret) {
dev_err_probe(&pdev->dev, ret,
@@ -1259,7 +1248,7 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
host->use_dma = use_dma;
host->mmc_irq = irq;
- host->sdio_irq = platform_get_irq(pdev, 1);
+ host->sdio_irq = platform_get_irq_optional(pdev, 1);
if (host->use_dma) {
ret = davinci_acquire_dma_channels(host);
@@ -1324,9 +1313,14 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
rename_region(mem, mmc_hostname(mmc));
+ if (mmc->caps & MMC_CAP_8_BIT_DATA)
+ bus_width = 8;
+ else if (mmc->caps & MMC_CAP_4_BIT_DATA)
+ bus_width = 4;
+ else
+ bus_width = 1;
dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
- host->use_dma ? "DMA" : "PIO",
- (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
+ host->use_dma ? "DMA" : "PIO", bus_width);
return 0;
@@ -1339,15 +1333,11 @@ cpu_freq_fail:
parse_fail:
dma_probe_defer:
clk_disable_unprepare(host->clk);
-clk_prepare_enable_fail:
-clk_get_fail:
-ioremap_fail:
- mmc_free_host(mmc);
return ret;
}
-static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
+static void davinci_mmcsd_remove(struct platform_device *pdev)
{
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
@@ -1355,12 +1345,8 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
mmc_davinci_cpufreq_deregister(host);
davinci_release_dma_channels(host);
clk_disable_unprepare(host->clk);
- mmc_free_host(host->mmc);
-
- return 0;
}
-#ifdef CONFIG_PM
static int davinci_mmcsd_suspend(struct device *dev)
{
struct mmc_davinci_host *host = dev_get_drvdata(dev);
@@ -1375,32 +1361,29 @@ static int davinci_mmcsd_suspend(struct device *dev)
static int davinci_mmcsd_resume(struct device *dev)
{
struct mmc_davinci_host *host = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(host->clk);
+ if (ret)
+ return ret;
- clk_enable(host->clk);
mmc_davinci_reset_ctrl(host, 0);
return 0;
}
-static const struct dev_pm_ops davinci_mmcsd_pm = {
- .suspend = davinci_mmcsd_suspend,
- .resume = davinci_mmcsd_resume,
-};
-
-#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
-#else
-#define davinci_mmcsd_pm_ops NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(davinci_mmcsd_pm_ops,
+ davinci_mmcsd_suspend, davinci_mmcsd_resume);
static struct platform_driver davinci_mmcsd_driver = {
.driver = {
.name = "davinci_mmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = davinci_mmcsd_pm_ops,
+ .pm = pm_sleep_ptr(&davinci_mmcsd_pm_ops),
.of_match_table = davinci_mmc_dt_ids,
},
.probe = davinci_mmcsd_probe,
- .remove = __exit_p(davinci_mmcsd_remove),
+ .remove = davinci_mmcsd_remove,
.id_table = davinci_mmc_devtype,
};
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
index 10baf122bc15..3cf526ab0387 100644
--- a/drivers/mmc/host/dw_mmc-bluefield.c
+++ b/drivers/mmc/host/dw_mmc-bluefield.c
@@ -3,6 +3,7 @@
* Copyright (C) 2018 Mellanox Technologies.
*/
+#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/mmc/host.h>
@@ -20,6 +21,9 @@
#define BLUEFIELD_UHS_REG_EXT_SAMPLE 2
#define BLUEFIELD_UHS_REG_EXT_DRIVE 4
+/* SMC call for RST_N */
+#define BLUEFIELD_SMC_SET_EMMC_RST_N 0x82000007
+
static void dw_mci_bluefield_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
u32 reg;
@@ -34,8 +38,20 @@ static void dw_mci_bluefield_set_ios(struct dw_mci *host, struct mmc_ios *ios)
mci_writel(host, UHS_REG_EXT, reg);
}
+static void dw_mci_bluefield_hw_reset(struct dw_mci *host)
+{
+ struct arm_smccc_res res = { 0 };
+
+ arm_smccc_smc(BLUEFIELD_SMC_SET_EMMC_RST_N, 0, 0, 0, 0, 0, 0, 0,
+ &res);
+
+ if (res.a0)
+ pr_err("RST_N failed.\n");
+}
+
static const struct dw_mci_drv_data bluefield_drv_data = {
- .set_ios = dw_mci_bluefield_set_ios
+ .set_ios = dw_mci_bluefield_set_ios,
+ .hw_reset = dw_mci_bluefield_hw_reset
};
static const struct of_device_id dw_mci_bluefield_match[] = {
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 0c75810812a0..384609671a9a 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -11,7 +11,6 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -28,6 +27,9 @@ enum dw_mci_exynos_type {
DW_MCI_TYPE_EXYNOS5420_SMU,
DW_MCI_TYPE_EXYNOS7,
DW_MCI_TYPE_EXYNOS7_SMU,
+ DW_MCI_TYPE_EXYNOS7870,
+ DW_MCI_TYPE_EXYNOS7870_SMU,
+ DW_MCI_TYPE_ARTPEC8,
};
/* Exynos implementation specific driver private data */
@@ -69,6 +71,15 @@ static struct dw_mci_exynos_compatible {
}, {
.compatible = "samsung,exynos7-dw-mshc-smu",
.ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU,
+ }, {
+ .compatible = "samsung,exynos7870-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS7870,
+ }, {
+ .compatible = "samsung,exynos7870-dw-mshc-smu",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS7870_SMU,
+ }, {
+ .compatible = "axis,artpec8-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_ARTPEC8,
},
};
@@ -81,7 +92,10 @@ static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
return EXYNOS4210_FIXED_CIU_CLK_DIV;
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1;
else
return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1;
@@ -96,7 +110,8 @@ static void dw_mci_exynos_config_smu(struct dw_mci *host)
* set for non-ecryption mode at this time.
*/
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) {
mci_writel(host, MPSBEGIN0, 0);
mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX);
mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT |
@@ -122,6 +137,17 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
}
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) {
+ /* Quirk needed for certain Exynos SoCs */
+ host->quirks |= DW_MMC_QUIRK_FIFO64_32;
+ }
+
+ if (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) {
+ /* Quirk needed for the ARTPEC-8 SoC */
+ host->quirks |= DW_MMC_QUIRK_EXTENDED_TMOUT;
+ }
+
host->bus_hz /= (priv->ciu_div + 1);
return 0;
@@ -133,7 +159,10 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
u32 clksel;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
@@ -141,7 +170,10 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
clksel = (clksel & ~SDMMC_CLKSEL_TIMING_MASK) | timing;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
@@ -157,7 +189,6 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags);
}
-#ifdef CONFIG_PM
static int dw_mci_exynos_runtime_resume(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
@@ -171,9 +202,7 @@ static int dw_mci_exynos_runtime_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM */
-#ifdef CONFIG_PM_SLEEP
/**
* dw_mci_exynos_suspend_noirq - Exynos-specific suspend code
* @dev: Device to suspend (this device)
@@ -210,14 +239,20 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
return ret;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
if (clksel & SDMMC_CLKSEL_WAKEUP_INT) {
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
@@ -227,7 +262,6 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
{
@@ -238,7 +272,8 @@ static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
* Not supported to configure register
* related to HS400
*/
- if (priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420) {
+ if ((priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420) ||
+ (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)) {
if (timing == MMC_TIMING_MMC_HS400)
dev_warn(host->dev,
"cannot configure HS400, unsupported chipset\n");
@@ -394,7 +429,10 @@ static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
struct dw_mci_exynos_priv_data *priv = host->priv;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64));
else
return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL));
@@ -406,13 +444,19 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
struct dw_mci_exynos_priv_data *priv = host->priv;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
@@ -425,7 +469,10 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
u8 sample;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
@@ -434,7 +481,10 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
@@ -442,14 +492,14 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
return sample;
}
-static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
+static s8 dw_mci_exynos_get_best_clksmpl(u8 candidates)
{
const u8 iter = 8;
u8 __c;
s8 i, loc = -1;
for (i = 0; i < iter; i++) {
- __c = ror8(candiates, i);
+ __c = ror8(candidates, i);
if ((__c & 0xc7) == 0xc7) {
loc = i;
goto out;
@@ -457,13 +507,25 @@ static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
}
for (i = 0; i < iter; i++) {
- __c = ror8(candiates, i);
+ __c = ror8(candidates, i);
if ((__c & 0x83) == 0x83) {
loc = i;
goto out;
}
}
+ /*
+ * If there is no cadiates value, then it needs to return -EIO.
+ * If there are candidates values and don't find bset clk sample value,
+ * then use a first candidates clock sample value.
+ */
+ for (i = 0; i < iter; i++) {
+ __c = ror8(candidates, i);
+ if ((__c & 0x1) == 0x1) {
+ loc = i;
+ goto out;
+ }
+ }
out:
return loc;
}
@@ -473,7 +535,7 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
struct dw_mci *host = slot->host;
struct dw_mci_exynos_priv_data *priv = host->priv;
struct mmc_host *mmc = slot->mmc;
- u8 start_smpl, smpl, candiates = 0;
+ u8 start_smpl, smpl, candidates = 0;
s8 found;
int ret = 0;
@@ -484,16 +546,18 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
smpl = dw_mci_exynos_move_next_clksmpl(host);
if (!mmc_send_tuning(mmc, opcode, NULL))
- candiates |= (1 << smpl);
+ candidates |= (1 << smpl);
} while (start_smpl != smpl);
- found = dw_mci_exynos_get_best_clksmpl(candiates);
+ found = dw_mci_exynos_get_best_clksmpl(candidates);
if (found >= 0) {
dw_mci_exynos_set_clksmpl(host, found);
priv->tuned_sample = found;
} else {
ret = -EIO;
+ dev_warn(&mmc->class_dev,
+ "There is no candidates value about clksmpl!\n");
}
return ret;
@@ -510,17 +574,65 @@ static int dw_mci_exynos_prepare_hs400_tuning(struct dw_mci *host,
return 0;
}
+static void dw_mci_exynos_set_data_timeout(struct dw_mci *host,
+ unsigned int timeout_ns)
+{
+ u32 clk_div, tmout;
+ u64 tmp;
+ unsigned int tmp2;
+
+ clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2;
+ if (clk_div == 0)
+ clk_div = 1;
+
+ tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC);
+ tmp = DIV_ROUND_UP_ULL(tmp, clk_div);
+
+ /* TMOUT[7:0] (RESPONSE_TIMEOUT) */
+ tmout = 0xFF; /* Set maximum */
+
+ /*
+ * Extended HW timer (max = 0x6FFFFF2):
+ * ((TMOUT[10:8] - 1) * 0xFFFFFF + TMOUT[31:11] * 8)
+ */
+ if (!tmp || tmp > 0x6FFFFF2)
+ tmout |= (0xFFFFFF << 8);
+ else {
+ /* TMOUT[10:8] */
+ tmp2 = (((unsigned int)tmp / 0xFFFFFF) + 1) & 0x7;
+ tmout |= tmp2 << 8;
+
+ /* TMOUT[31:11] */
+ tmp = tmp - ((tmp2 - 1) * 0xFFFFFF);
+ tmout |= (tmp & 0xFFFFF8) << 8;
+ }
+
+ mci_writel(host, TMOUT, tmout);
+ dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x",
+ timeout_ns, tmout >> 8);
+}
+
+static u32 dw_mci_exynos_get_drto_clks(struct dw_mci *host)
+{
+ u32 drto_clks;
+
+ drto_clks = mci_readl(host, TMOUT) >> 8;
+
+ return (((drto_clks & 0x7) - 1) * 0xFFFFFF) + ((drto_clks & 0xFFFFF8));
+}
+
/* Common capabilities of Exynos4/Exynos5 SoC */
static unsigned long exynos_dwmmc_caps[4] = {
- MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23,
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
+ MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA,
+ 0,
+ 0,
+ 0,
};
static const struct dw_mci_drv_data exynos_drv_data = {
.caps = exynos_dwmmc_caps,
.num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
+ .common_caps = MMC_CAP_CMD23,
.init = dw_mci_exynos_priv_init,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
@@ -528,6 +640,16 @@ static const struct dw_mci_drv_data exynos_drv_data = {
.prepare_hs400_tuning = dw_mci_exynos_prepare_hs400_tuning,
};
+static const struct dw_mci_drv_data artpec_drv_data = {
+ .common_caps = MMC_CAP_CMD23,
+ .init = dw_mci_exynos_priv_init,
+ .set_ios = dw_mci_exynos_set_ios,
+ .parse_dt = dw_mci_exynos_parse_dt,
+ .execute_tuning = dw_mci_exynos_execute_tuning,
+ .set_data_timeout = dw_mci_exynos_set_data_timeout,
+ .get_drto_clks = dw_mci_exynos_get_drto_clks,
+};
+
static const struct of_device_id dw_mci_exynos_match[] = {
{ .compatible = "samsung,exynos4412-dw-mshc",
.data = &exynos_drv_data, },
@@ -541,6 +663,12 @@ static const struct of_device_id dw_mci_exynos_match[] = {
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos7-dw-mshc-smu",
.data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos7870-dw-mshc",
+ .data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos7870-dw-mshc-smu",
+ .data = &exynos_drv_data, },
+ { .compatible = "axis,artpec8-dw-mshc",
+ .data = &artpec_drv_data, },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
@@ -570,21 +698,18 @@ static int dw_mci_exynos_probe(struct platform_device *pdev)
return 0;
}
-static int dw_mci_exynos_remove(struct platform_device *pdev)
+static void dw_mci_exynos_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
}
static const struct dev_pm_ops dw_mci_exynos_pmops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend_noirq,
- dw_mci_exynos_resume_noirq)
- SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
- dw_mci_exynos_runtime_resume,
- NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend_noirq, dw_mci_exynos_resume_noirq)
+ RUNTIME_PM_OPS(dw_mci_runtime_suspend, dw_mci_exynos_runtime_resume, NULL)
};
static struct platform_driver dw_mci_exynos_pltfm_driver = {
@@ -594,7 +719,7 @@ static struct platform_driver dw_mci_exynos_pltfm_driver = {
.name = "dwmmc_exynos",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_exynos_match,
- .pm = &dw_mci_exynos_pmops,
+ .pm = pm_ptr(&dw_mci_exynos_pmops),
},
};
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index 39794f93826f..0ccfae1b2dbe 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -23,12 +23,6 @@ struct hi3798cv200_priv {
struct clk *drive_clk;
};
-static unsigned long dw_mci_hi3798cv200_caps[] = {
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
- MMC_CAP_CMD23
-};
-
static void dw_mci_hi3798cv200_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
struct hi3798cv200_priv *priv = host->priv;
@@ -93,7 +87,6 @@ static int dw_mci_hi3798cv200_execute_tuning(struct dw_mci_slot *slot,
goto tuning_out;
prev_err = err;
- err = 0;
}
tuning_out:
@@ -166,8 +159,7 @@ disable_sample_clk:
}
static const struct dw_mci_drv_data hi3798cv200_data = {
- .caps = dw_mci_hi3798cv200_caps,
- .num_caps = ARRAY_SIZE(dw_mci_hi3798cv200_caps),
+ .common_caps = MMC_CAP_CMD23,
.init = dw_mci_hi3798cv200_init,
.set_ios = dw_mci_hi3798cv200_set_ios,
.execute_tuning = dw_mci_hi3798cv200_execute_tuning,
@@ -178,7 +170,7 @@ static int dw_mci_hi3798cv200_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, &hi3798cv200_data);
}
-static int dw_mci_hi3798cv200_remove(struct platform_device *pdev)
+static void dw_mci_hi3798cv200_remove(struct platform_device *pdev)
{
struct dw_mci *host = platform_get_drvdata(pdev);
struct hi3798cv200_priv *priv = host->priv;
@@ -186,7 +178,7 @@ static int dw_mci_hi3798cv200_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->drive_clk);
clk_disable_unprepare(priv->sample_clk);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
}
static const struct of_device_id dw_mci_hi3798cv200_match[] = {
@@ -208,4 +200,3 @@ module_platform_driver(dw_mci_hi3798cv200_driver);
MODULE_DESCRIPTION("HiSilicon Hi3798CV200 Specific DW-MSHC Driver Extension");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:dwmmc_hi3798cv200");
diff --git a/drivers/mmc/host/dw_mmc-hi3798mv200.c b/drivers/mmc/host/dw_mmc-hi3798mv200.c
new file mode 100644
index 000000000000..5791a975a944
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-hi3798mv200.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Modified from dw_mmc-hi3798cv200.c
+ *
+ * Copyright (c) 2024 Yang Xiwen <forbidden405@outlook.com>
+ * Copyright (c) 2018 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+
+#define SDMMC_TUNING_CTRL 0x118
+#define SDMMC_TUNING_FIND_EDGE BIT(5)
+
+#define ALL_INT_CLR 0x1ffff
+
+/* DLL ctrl reg */
+#define SAP_DLL_CTRL_DLLMODE BIT(16)
+
+struct dw_mci_hi3798mv200_priv {
+ struct clk *sample_clk;
+ struct clk *drive_clk;
+ struct regmap *crg_reg;
+ u32 sap_dll_offset;
+ struct mmc_clk_phase_map phase_map;
+};
+
+static void dw_mci_hi3798mv200_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ struct dw_mci_hi3798mv200_priv *priv = host->priv;
+ struct mmc_clk_phase phase = priv->phase_map.phase[ios->timing];
+ u32 val;
+
+ val = mci_readl(host, ENABLE_SHIFT);
+ if (ios->timing == MMC_TIMING_MMC_DDR52
+ || ios->timing == MMC_TIMING_UHS_DDR50)
+ val |= SDMMC_ENABLE_PHASE;
+ else
+ val &= ~SDMMC_ENABLE_PHASE;
+ mci_writel(host, ENABLE_SHIFT, val);
+
+ val = mci_readl(host, DDR_REG);
+ if (ios->timing == MMC_TIMING_MMC_HS400)
+ val |= SDMMC_DDR_HS400;
+ else
+ val &= ~SDMMC_DDR_HS400;
+ mci_writel(host, DDR_REG, val);
+
+ if (clk_set_rate(host->ciu_clk, ios->clock))
+ dev_warn(host->dev, "Failed to set rate to %u\n", ios->clock);
+ else
+ /*
+ * CLK_MUX_ROUND_NEAREST is enabled for this clock
+ * The actual clock rate is not what we set, but a rounded value
+ * so we should get the rate once again
+ */
+ host->bus_hz = clk_get_rate(host->ciu_clk);
+
+ if (phase.valid) {
+ clk_set_phase(priv->drive_clk, phase.out_deg);
+ clk_set_phase(priv->sample_clk, phase.in_deg);
+ } else {
+ dev_warn(host->dev,
+ "The phase entry for timing mode %d is missing in device tree.\n",
+ ios->timing);
+ }
+}
+
+static inline int dw_mci_hi3798mv200_enable_tuning(struct dw_mci_slot *slot)
+{
+ struct dw_mci_hi3798mv200_priv *priv = slot->host->priv;
+
+ return regmap_clear_bits(priv->crg_reg, priv->sap_dll_offset, SAP_DLL_CTRL_DLLMODE);
+}
+
+static inline int dw_mci_hi3798mv200_disable_tuning(struct dw_mci_slot *slot)
+{
+ struct dw_mci_hi3798mv200_priv *priv = slot->host->priv;
+
+ return regmap_set_bits(priv->crg_reg, priv->sap_dll_offset, SAP_DLL_CTRL_DLLMODE);
+}
+
+static int dw_mci_hi3798mv200_execute_tuning_mix_mode(struct dw_mci_slot *slot,
+ u32 opcode)
+{
+ static const int degrees[] = { 0, 45, 90, 135, 180, 225, 270, 315 };
+ struct dw_mci *host = slot->host;
+ struct dw_mci_hi3798mv200_priv *priv = host->priv;
+ int raise_point = -1, fall_point = -1, mid;
+ int err, prev_err = -1;
+ int found = 0;
+ int regval;
+ int i;
+ int ret;
+
+ ret = dw_mci_hi3798mv200_enable_tuning(slot);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(degrees); i++) {
+ clk_set_phase(priv->sample_clk, degrees[i]);
+ mci_writel(host, RINTSTS, ALL_INT_CLR);
+
+ /*
+ * HiSilicon implemented a tuning mechanism.
+ * It needs special interaction with the DLL.
+ *
+ * Treat edge(flip) found as an error too.
+ */
+ err = mmc_send_tuning(slot->mmc, opcode, NULL);
+ regval = mci_readl(host, TUNING_CTRL);
+ if (err || (regval & SDMMC_TUNING_FIND_EDGE))
+ err = 1;
+ else
+ found = 1;
+
+ if (i > 0) {
+ if (err && !prev_err)
+ fall_point = i - 1;
+ if (!err && prev_err)
+ raise_point = i;
+ }
+
+ if (raise_point != -1 && fall_point != -1)
+ goto tuning_out;
+
+ prev_err = err;
+ }
+
+tuning_out:
+ ret = dw_mci_hi3798mv200_disable_tuning(slot);
+ if (ret < 0)
+ return ret;
+
+ if (found) {
+ if (raise_point == -1)
+ raise_point = 0;
+ if (fall_point == -1)
+ fall_point = ARRAY_SIZE(degrees) - 1;
+ if (fall_point < raise_point) {
+ if ((raise_point + fall_point) >
+ (ARRAY_SIZE(degrees) - 1))
+ mid = fall_point / 2;
+ else
+ mid = (raise_point + ARRAY_SIZE(degrees) - 1) / 2;
+ } else {
+ mid = (raise_point + fall_point) / 2;
+ }
+
+ /*
+ * We don't care what timing we are tuning for,
+ * simply use the same phase for all timing needs tuning.
+ */
+ priv->phase_map.phase[MMC_TIMING_MMC_HS200].in_deg = degrees[mid];
+ priv->phase_map.phase[MMC_TIMING_MMC_HS400].in_deg = degrees[mid];
+ priv->phase_map.phase[MMC_TIMING_UHS_SDR104].in_deg = degrees[mid];
+
+ clk_set_phase(priv->sample_clk, degrees[mid]);
+ dev_dbg(host->dev, "Tuning clk_sample[%d, %d], set[%d]\n",
+ raise_point, fall_point, degrees[mid]);
+ ret = 0;
+ } else {
+ dev_err(host->dev, "No valid clk_sample shift!\n");
+ ret = -EINVAL;
+ }
+
+ mci_writel(host, RINTSTS, ALL_INT_CLR);
+
+ return ret;
+}
+
+static int dw_mci_hi3798mv200_init(struct dw_mci *host)
+{
+ struct dw_mci_hi3798mv200_priv *priv;
+ struct device_node *np = host->dev->of_node;
+
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mmc_of_parse_clk_phase(host->dev, &priv->phase_map);
+
+ priv->sample_clk = devm_clk_get_enabled(host->dev, "ciu-sample");
+ if (IS_ERR(priv->sample_clk))
+ return dev_err_probe(host->dev, PTR_ERR(priv->sample_clk),
+ "failed to get enabled ciu-sample clock\n");
+
+ priv->drive_clk = devm_clk_get_enabled(host->dev, "ciu-drive");
+ if (IS_ERR(priv->drive_clk))
+ return dev_err_probe(host->dev, PTR_ERR(priv->drive_clk),
+ "failed to get enabled ciu-drive clock\n");
+
+ priv->crg_reg = syscon_regmap_lookup_by_phandle_args(np, "hisilicon,sap-dll-reg",
+ 1, &priv->sap_dll_offset);
+ if (IS_ERR(priv->crg_reg))
+ return dev_err_probe(host->dev, PTR_ERR(priv->crg_reg),
+ "failed to get CRG reg\n");
+
+ host->priv = priv;
+ return 0;
+}
+
+static const struct dw_mci_drv_data hi3798mv200_data = {
+ .common_caps = MMC_CAP_CMD23,
+ .init = dw_mci_hi3798mv200_init,
+ .set_ios = dw_mci_hi3798mv200_set_ios,
+ .execute_tuning = dw_mci_hi3798mv200_execute_tuning_mix_mode,
+};
+
+static const struct of_device_id dw_mci_hi3798mv200_match[] = {
+ { .compatible = "hisilicon,hi3798mv200-dw-mshc" },
+ {},
+};
+
+static int dw_mci_hi3798mv200_probe(struct platform_device *pdev)
+{
+ return dw_mci_pltfm_register(pdev, &hi3798mv200_data);
+}
+
+static void dw_mci_hi3798mv200_remove(struct platform_device *pdev)
+{
+ dw_mci_pltfm_remove(pdev);
+}
+
+MODULE_DEVICE_TABLE(of, dw_mci_hi3798mv200_match);
+static struct platform_driver dw_mci_hi3798mv200_driver = {
+ .probe = dw_mci_hi3798mv200_probe,
+ .remove = dw_mci_hi3798mv200_remove,
+ .driver = {
+ .name = "dwmmc_hi3798mv200",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = dw_mci_hi3798mv200_match,
+ },
+};
+module_platform_driver(dw_mci_hi3798mv200_driver);
+
+MODULE_DESCRIPTION("HiSilicon Hi3798MV200 Specific DW-MSHC Driver Extension");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 0311a37dd4ab..ad6aa1aea549 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -461,11 +461,8 @@ static int dw_mci_k3_probe(struct platform_device *pdev)
}
static const struct dev_pm_ops dw_mci_k3_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
- dw_mci_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dw_mci_runtime_suspend, dw_mci_runtime_resume, NULL)
};
static struct platform_driver dw_mci_k3_pltfm_driver = {
@@ -475,7 +472,7 @@ static struct platform_driver dw_mci_k3_pltfm_driver = {
.name = "dwmmc_k3",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_k3_match,
- .pm = &dw_mci_k3_dev_pm_ops,
+ .pm = pm_ptr(&dw_mci_k3_dev_pm_ops),
},
};
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index e7ab699f488e..092cc99175af 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -75,11 +75,8 @@ static void dw_mci_pci_remove(struct pci_dev *pdev)
}
static const struct dev_pm_ops dw_mci_pci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
- dw_mci_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dw_mci_runtime_suspend, dw_mci_runtime_resume, NULL)
};
static const struct pci_device_id dw_mci_pci_id[] = {
@@ -94,7 +91,7 @@ static struct pci_driver dw_mci_pci_driver = {
.probe = dw_mci_pci_probe,
.remove = dw_mci_pci_remove,
.driver = {
- .pm = &dw_mci_pci_dev_pm_ops,
+ .pm = pm_ptr(&dw_mci_pci_dev_pm_ops),
},
};
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 9901208be797..de820ffd2133 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -17,10 +17,16 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
+#include <linux/mfd/altera-sysmgr.h>
+#include <linux/regmap.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
+#define SOCFPGA_DW_MMC_CLK_PHASE_STEP 45
+#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel, reg_shift) \
+ ((((smplsel) & 0x7) << reg_shift) | (((drvsel) & 0x7) << 0))
+
int dw_mci_pltfm_register(struct platform_device *pdev,
const struct dw_mci_drv_data *drv_data)
{
@@ -40,8 +46,7 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
host->irq_flags = 0;
host->pdata = pdev->dev.platform_data;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->regs = devm_ioremap_resource(&pdev->dev, regs);
+ host->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
@@ -62,9 +67,42 @@ const struct dev_pm_ops dw_mci_pltfm_pmops = {
};
EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops);
+static int dw_mci_socfpga_priv_init(struct dw_mci *host)
+{
+ struct device_node *np = host->dev->of_node;
+ struct regmap *sys_mgr_base_addr;
+ u32 clk_phase[2] = {0}, reg_offset, reg_shift;
+ int i, rc, hs_timing;
+
+ rc = of_property_read_variable_u32_array(np, "clk-phase-sd-hs", &clk_phase[0], 2, 0);
+ if (rc < 0)
+ return 0;
+
+ sys_mgr_base_addr = altr_sysmgr_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
+ if (IS_ERR(sys_mgr_base_addr)) {
+ dev_warn(host->dev, "clk-phase-sd-hs was specified, but failed to find altr,sys-mgr regmap!\n");
+ return 0;
+ }
+
+ of_property_read_u32_index(np, "altr,sysmgr-syscon", 1, &reg_offset);
+ of_property_read_u32_index(np, "altr,sysmgr-syscon", 2, &reg_shift);
+
+ for (i = 0; i < ARRAY_SIZE(clk_phase); i++)
+ clk_phase[i] /= SOCFPGA_DW_MMC_CLK_PHASE_STEP;
+
+ hs_timing = SYSMGR_SDMMC_CTRL_SET(clk_phase[0], clk_phase[1], reg_shift);
+ regmap_write(sys_mgr_base_addr, reg_offset, hs_timing);
+
+ return 0;
+}
+
+static const struct dw_mci_drv_data socfpga_drv_data = {
+ .init = dw_mci_socfpga_priv_init,
+};
+
static const struct of_device_id dw_mci_pltfm_match[] = {
{ .compatible = "snps,dw-mshc", },
- { .compatible = "altr,socfpga-dw-mshc", },
+ { .compatible = "altr,socfpga-dw-mshc", .data = &socfpga_drv_data, },
{ .compatible = "img,pistachio-dw-mshc", },
{},
};
@@ -83,12 +121,11 @@ static int dw_mci_pltfm_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, drv_data);
}
-int dw_mci_pltfm_remove(struct platform_device *pdev)
+void dw_mci_pltfm_remove(struct platform_device *pdev)
{
struct dw_mci *host = platform_get_drvdata(pdev);
dw_mci_remove(host);
- return 0;
}
EXPORT_SYMBOL_GPL(dw_mci_pltfm_remove);
diff --git a/drivers/mmc/host/dw_mmc-pltfm.h b/drivers/mmc/host/dw_mmc-pltfm.h
index 2d50d7da2e36..64cf7522a3d4 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.h
+++ b/drivers/mmc/host/dw_mmc-pltfm.h
@@ -10,7 +10,7 @@
extern int dw_mci_pltfm_register(struct platform_device *pdev,
const struct dw_mci_drv_data *drv_data);
-extern int dw_mci_pltfm_remove(struct platform_device *pdev);
+extern void dw_mci_pltfm_remove(struct platform_device *pdev);
extern const struct dev_pm_ops dw_mci_pltfm_pmops;
#endif /* _DW_MMC_PLTFM_H_ */
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index d36991acd6df..62c68cda1e21 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/mmc/host.h>
#include <linux/of_address.h>
#include <linux/mmc/slot-gpio.h>
@@ -15,15 +16,164 @@
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
-#define RK3288_CLKGEN_DIV 2
+#define RK3288_CLKGEN_DIV 2
+#define SDMMC_TIMING_CON0 0x130
+#define SDMMC_TIMING_CON1 0x134
+#define SDMMC_MISC_CON 0x138
+#define MEM_CLK_AUTOGATE_ENABLE BIT(5)
+#define ROCKCHIP_MMC_DELAY_SEL BIT(10)
+#define ROCKCHIP_MMC_DEGREE_MASK 0x3
+#define ROCKCHIP_MMC_DEGREE_OFFSET 1
+#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
+#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
+#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
+
+static const unsigned int freqs[] = { 100000, 200000, 300000, 400000 };
struct dw_mci_rockchip_priv_data {
struct clk *drv_clk;
struct clk *sample_clk;
int default_sample_phase;
int num_phases;
+ bool internal_phase;
};
+/*
+ * Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to
+ * simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg.
+ */
+static int rockchip_mmc_get_internal_phase(struct dw_mci *host, bool sample)
+{
+ unsigned long rate = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
+ u32 raw_value;
+ u16 degrees;
+ u32 delay_num = 0;
+
+ /* Constant signal, no measurable phase shift */
+ if (!rate)
+ return 0;
+
+ if (sample)
+ raw_value = mci_readl(host, TIMING_CON1);
+ else
+ raw_value = mci_readl(host, TIMING_CON0);
+
+ raw_value >>= ROCKCHIP_MMC_DEGREE_OFFSET;
+ degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
+
+ if (raw_value & ROCKCHIP_MMC_DELAY_SEL) {
+ /* degrees/delaynum * 1000000 */
+ unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) *
+ 36 * (rate / 10000);
+
+ delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
+ delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
+ degrees += DIV_ROUND_CLOSEST(delay_num * factor, 1000000);
+ }
+
+ return degrees % 360;
+}
+
+static int rockchip_mmc_get_phase(struct dw_mci *host, bool sample)
+{
+ struct dw_mci_rockchip_priv_data *priv = host->priv;
+ struct clk *clock = sample ? priv->sample_clk : priv->drv_clk;
+
+ if (priv->internal_phase)
+ return rockchip_mmc_get_internal_phase(host, sample);
+ else
+ return clk_get_phase(clock);
+}
+
+static int rockchip_mmc_set_internal_phase(struct dw_mci *host, bool sample, int degrees)
+{
+ unsigned long rate = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
+ u8 nineties, remainder;
+ u8 delay_num;
+ u32 raw_value;
+ u32 delay;
+
+ /*
+ * The below calculation is based on the output clock from
+ * MMC host to the card, which expects the phase clock inherits
+ * the clock rate from its parent, namely the output clock
+ * provider of MMC host. However, things may go wrong if
+ * (1) It is orphan.
+ * (2) It is assigned to the wrong parent.
+ *
+ * This check help debug the case (1), which seems to be the
+ * most likely problem we often face and which makes it difficult
+ * for people to debug unstable mmc tuning results.
+ */
+ if (!rate) {
+ dev_err(host->dev, "%s: invalid clk rate\n", __func__);
+ return -EINVAL;
+ }
+
+ nineties = degrees / 90;
+ remainder = (degrees % 90);
+
+ /*
+ * Due to the inexact nature of the "fine" delay, we might
+ * actually go non-monotonic. We don't go _too_ monotonic
+ * though, so we should be OK. Here are options of how we may
+ * work:
+ *
+ * Ideally we end up with:
+ * 1.0, 2.0, ..., 69.0, 70.0, ..., 89.0, 90.0
+ *
+ * On one extreme (if delay is actually 44ps):
+ * .73, 1.5, ..., 50.6, 51.3, ..., 65.3, 90.0
+ * The other (if delay is actually 77ps):
+ * 1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90
+ *
+ * It's possible we might make a delay that is up to 25
+ * degrees off from what we think we're making. That's OK
+ * though because we should be REALLY far from any bad range.
+ */
+
+ /*
+ * Convert to delay; do a little extra work to make sure we
+ * don't overflow 32-bit / 64-bit numbers.
+ */
+ delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */
+ delay *= remainder;
+ delay = DIV_ROUND_CLOSEST(delay,
+ (rate / 1000) * 36 *
+ (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10));
+
+ delay_num = (u8) min_t(u32, delay, 255);
+
+ raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
+ raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
+ raw_value |= nineties;
+
+ if (sample)
+ mci_writel(host, TIMING_CON1,
+ FIELD_PREP_WM16(GENMASK(11, 1), raw_value));
+ else
+ mci_writel(host, TIMING_CON0,
+ FIELD_PREP_WM16(GENMASK(11, 1), raw_value));
+
+ dev_dbg(host->dev, "set %s_phase(%d) delay_nums=%u actual_degrees=%d\n",
+ sample ? "sample" : "drv", degrees, delay_num,
+ rockchip_mmc_get_phase(host, sample)
+ );
+
+ return 0;
+}
+
+static int rockchip_mmc_set_phase(struct dw_mci *host, bool sample, int degrees)
+{
+ struct dw_mci_rockchip_priv_data *priv = host->priv;
+ struct clk *clock = sample ? priv->sample_clk : priv->drv_clk;
+
+ if (priv->internal_phase)
+ return rockchip_mmc_set_internal_phase(host, sample, degrees);
+ else
+ return clk_set_phase(clock, degrees);
+}
+
static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
struct dw_mci_rockchip_priv_data *priv = host->priv;
@@ -51,7 +201,7 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
ret = clk_set_rate(host->ciu_clk, cclkin);
if (ret)
- dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock);
+ dev_warn(host->dev, "failed to set rate %uHz err: %d\n", cclkin, ret);
bus_hz = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
if (bus_hz != host->bus_hz) {
@@ -62,7 +212,7 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
/* Make sure we use phases which we can enumerate with */
if (!IS_ERR(priv->sample_clk) && ios->timing <= MMC_TIMING_SD_HS)
- clk_set_phase(priv->sample_clk, priv->default_sample_phase);
+ rockchip_mmc_set_phase(host, true, priv->default_sample_phase);
/*
* Set the drive phase offset based on speed mode to achieve hold times.
@@ -125,7 +275,7 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
break;
}
- clk_set_phase(priv->drv_clk, phase);
+ rockchip_mmc_set_phase(host, false, phase);
}
}
@@ -149,6 +299,7 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
int longest_range_len = -1;
int longest_range = -1;
int middle_phase;
+ int phase;
if (IS_ERR(priv->sample_clk)) {
dev_err(host->dev, "Tuning clock (sample_clk) not defined.\n");
@@ -162,8 +313,10 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
/* Try each phase and extract good ranges */
for (i = 0; i < priv->num_phases; ) {
- clk_set_phase(priv->sample_clk,
- TUNING_ITERATION_TO_PHASE(i, priv->num_phases));
+ rockchip_mmc_set_phase(host, true,
+ TUNING_ITERATION_TO_PHASE(
+ i,
+ priv->num_phases));
v = !mmc_send_tuning(mmc, opcode, NULL);
@@ -209,7 +362,8 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
}
if (ranges[0].start == 0 && ranges[0].end == priv->num_phases - 1) {
- clk_set_phase(priv->sample_clk, priv->default_sample_phase);
+ rockchip_mmc_set_phase(host, true, priv->default_sample_phase);
+
dev_info(host->dev, "All phases work, using default phase %d.",
priv->default_sample_phase);
goto free;
@@ -246,19 +400,17 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
middle_phase = ranges[longest_range].start + longest_range_len / 2;
middle_phase %= priv->num_phases;
- dev_info(host->dev, "Successfully tuned phase to %d\n",
- TUNING_ITERATION_TO_PHASE(middle_phase, priv->num_phases));
+ phase = TUNING_ITERATION_TO_PHASE(middle_phase, priv->num_phases);
+ dev_info(host->dev, "Successfully tuned phase to %d\n", phase);
- clk_set_phase(priv->sample_clk,
- TUNING_ITERATION_TO_PHASE(middle_phase,
- priv->num_phases));
+ rockchip_mmc_set_phase(host, true, phase);
free:
kfree(ranges);
return ret;
}
-static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
+static int dw_mci_common_parse_dt(struct dw_mci *host)
{
struct device_node *np = host->dev->of_node;
struct dw_mci_rockchip_priv_data *priv;
@@ -268,13 +420,29 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
return -ENOMEM;
if (of_property_read_u32(np, "rockchip,desired-num-phases",
- &priv->num_phases))
+ &priv->num_phases))
priv->num_phases = 360;
if (of_property_read_u32(np, "rockchip,default-sample-phase",
- &priv->default_sample_phase))
+ &priv->default_sample_phase))
priv->default_sample_phase = 0;
+ host->priv = priv;
+
+ return 0;
+}
+
+static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
+{
+ struct dw_mci_rockchip_priv_data *priv;
+ int err;
+
+ err = dw_mci_common_parse_dt(host);
+ if (err)
+ return err;
+
+ priv = host->priv;
+
priv->drv_clk = devm_clk_get(host->dev, "ciu-drive");
if (IS_ERR(priv->drv_clk))
dev_dbg(host->dev, "ciu-drive not available\n");
@@ -283,49 +451,85 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
if (IS_ERR(priv->sample_clk))
dev_dbg(host->dev, "ciu-sample not available\n");
- host->priv = priv;
+ priv->internal_phase = false;
+
+ return 0;
+}
+
+static int dw_mci_rk3576_parse_dt(struct dw_mci *host)
+{
+ struct dw_mci_rockchip_priv_data *priv;
+ int err = dw_mci_common_parse_dt(host);
+ if (err)
+ return err;
+
+ priv = host->priv;
+
+ priv->internal_phase = true;
return 0;
}
static int dw_mci_rockchip_init(struct dw_mci *host)
{
+ struct dw_mci_rockchip_priv_data *priv = host->priv;
+ int ret, i;
+
/* It is slot 8 on Rockchip SoCs */
host->sdio_id0 = 8;
- if (of_device_is_compatible(host->dev->of_node,
- "rockchip,rk3288-dw-mshc"))
+ if (of_device_is_compatible(host->dev->of_node, "rockchip,rk3288-dw-mshc")) {
host->bus_hz /= RK3288_CLKGEN_DIV;
+ /* clock driver will fail if the clock is less than the lowest source clock
+ * divided by the internal clock divider. Test for the lowest available
+ * clock and set the minimum freq to clock / clock divider.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ ret = clk_round_rate(host->ciu_clk, freqs[i] * RK3288_CLKGEN_DIV);
+ if (ret > 0) {
+ host->minimum_speed = ret / RK3288_CLKGEN_DIV;
+ break;
+ }
+ }
+ if (ret < 0)
+ dev_warn(host->dev, "no valid minimum freq: %d\n", ret);
+ }
+
+ if (priv->internal_phase)
+ mci_writel(host, MISC_CON, MEM_CLK_AUTOGATE_ENABLE);
+
return 0;
}
-/* Common capabilities of RK3288 SoC */
-static unsigned long dw_mci_rk3288_dwmmc_caps[4] = {
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
-};
-
static const struct dw_mci_drv_data rk2928_drv_data = {
.init = dw_mci_rockchip_init,
};
static const struct dw_mci_drv_data rk3288_drv_data = {
- .caps = dw_mci_rk3288_dwmmc_caps,
- .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),
+ .common_caps = MMC_CAP_CMD23,
.set_ios = dw_mci_rk3288_set_ios,
.execute_tuning = dw_mci_rk3288_execute_tuning,
.parse_dt = dw_mci_rk3288_parse_dt,
.init = dw_mci_rockchip_init,
};
+static const struct dw_mci_drv_data rk3576_drv_data = {
+ .common_caps = MMC_CAP_CMD23,
+ .set_ios = dw_mci_rk3288_set_ios,
+ .execute_tuning = dw_mci_rk3288_execute_tuning,
+ .parse_dt = dw_mci_rk3576_parse_dt,
+ .init = dw_mci_rockchip_init,
+};
+
static const struct of_device_id dw_mci_rockchip_match[] = {
{ .compatible = "rockchip,rk2928-dw-mshc",
.data = &rk2928_drv_data },
{ .compatible = "rockchip,rk3288-dw-mshc",
.data = &rk3288_drv_data },
+ { .compatible = "rockchip,rk3576-dw-mshc",
+ .data = &rk3576_drv_data },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_rockchip_match);
@@ -361,21 +565,18 @@ static int dw_mci_rockchip_probe(struct platform_device *pdev)
return 0;
}
-static int dw_mci_rockchip_remove(struct platform_device *pdev)
+static void dw_mci_rockchip_remove(struct platform_device *pdev)
{
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
}
static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
- dw_mci_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dw_mci_runtime_suspend, dw_mci_runtime_resume, NULL)
};
static struct platform_driver dw_mci_rockchip_pltfm_driver = {
@@ -385,7 +586,7 @@ static struct platform_driver dw_mci_rockchip_pltfm_driver = {
.name = "dwmmc_rockchip",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_rockchip_match,
- .pm = &dw_mci_rockchip_dev_pm_ops,
+ .pm = pm_ptr(&dw_mci_rockchip_dev_pm_ops),
},
};
diff --git a/drivers/mmc/host/dw_mmc-starfive.c b/drivers/mmc/host/dw_mmc-starfive.c
new file mode 100644
index 000000000000..34964b0dab21
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-starfive.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive Designware Mobile Storage Host Controller Driver
+ *
+ * Copyright (c) 2022 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+
+#define ALL_INT_CLR 0x1ffff
+#define MAX_DELAY_CHAIN 32
+
+#define STARFIVE_SMPL_PHASE GENMASK(20, 16)
+
+static void dw_mci_starfive_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ int ret;
+ unsigned int clock;
+
+ if (ios->timing == MMC_TIMING_MMC_DDR52 || ios->timing == MMC_TIMING_UHS_DDR50) {
+ clock = (ios->clock > 50000000 && ios->clock <= 52000000) ? 100000000 : ios->clock;
+ ret = clk_set_rate(host->ciu_clk, clock);
+ if (ret)
+ dev_dbg(host->dev, "Use an external frequency divider %uHz\n", ios->clock);
+ host->bus_hz = clk_get_rate(host->ciu_clk);
+ } else {
+ dev_dbg(host->dev, "Using the internal divider\n");
+ }
+}
+
+static void dw_mci_starfive_set_sample_phase(struct dw_mci *host, u32 smpl_phase)
+{
+ /* change driver phase and sample phase */
+ u32 reg_value = mci_readl(host, UHS_REG_EXT);
+
+ /* In UHS_REG_EXT, only 5 bits valid in DRV_PHASE and SMPL_PHASE */
+ reg_value &= ~STARFIVE_SMPL_PHASE;
+ reg_value |= FIELD_PREP(STARFIVE_SMPL_PHASE, smpl_phase);
+ mci_writel(host, UHS_REG_EXT, reg_value);
+
+ /* We should delay 1ms wait for timing setting finished. */
+ mdelay(1);
+}
+
+static int dw_mci_starfive_execute_tuning(struct dw_mci_slot *slot,
+ u32 opcode)
+{
+ static const int grade = MAX_DELAY_CHAIN;
+ struct dw_mci *host = slot->host;
+ int smpl_phase, smpl_raise = -1, smpl_fall = -1;
+ int ret;
+
+ for (smpl_phase = 0; smpl_phase < grade; smpl_phase++) {
+ dw_mci_starfive_set_sample_phase(host, smpl_phase);
+ mci_writel(host, RINTSTS, ALL_INT_CLR);
+
+ ret = mmc_send_tuning(slot->mmc, opcode, NULL);
+
+ if (!ret && smpl_raise < 0) {
+ smpl_raise = smpl_phase;
+ } else if (ret && smpl_raise >= 0) {
+ smpl_fall = smpl_phase - 1;
+ break;
+ }
+ }
+
+ if (smpl_phase >= grade)
+ smpl_fall = grade - 1;
+
+ if (smpl_raise < 0) {
+ smpl_phase = 0;
+ dev_err(host->dev, "No valid delay chain! use default\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ smpl_phase = (smpl_raise + smpl_fall) / 2;
+ dev_dbg(host->dev, "Found valid delay chain! use it [delay=%d]\n", smpl_phase);
+ ret = 0;
+
+out:
+ dw_mci_starfive_set_sample_phase(host, smpl_phase);
+ mci_writel(host, RINTSTS, ALL_INT_CLR);
+ return ret;
+}
+
+static const struct dw_mci_drv_data starfive_data = {
+ .common_caps = MMC_CAP_CMD23,
+ .set_ios = dw_mci_starfive_set_ios,
+ .execute_tuning = dw_mci_starfive_execute_tuning,
+};
+
+static const struct of_device_id dw_mci_starfive_match[] = {
+ { .compatible = "starfive,jh7110-mmc",
+ .data = &starfive_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_starfive_match);
+
+static int dw_mci_starfive_probe(struct platform_device *pdev)
+{
+ return dw_mci_pltfm_register(pdev, &starfive_data);
+}
+
+static struct platform_driver dw_mci_starfive_driver = {
+ .probe = dw_mci_starfive_probe,
+ .remove = dw_mci_pltfm_remove,
+ .driver = {
+ .name = "dwmmc_starfive",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = dw_mci_starfive_match,
+ },
+};
+module_platform_driver(dw_mci_starfive_driver);
+
+MODULE_DESCRIPTION("StarFive JH7110 Specific DW-MSHC Driver Extension");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dwmmc_starfive");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index d333130d1531..9e74b675e92d 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -17,9 +17,11 @@
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
+#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/prandom.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
@@ -33,7 +35,6 @@
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
#include "dw_mmc.h"
@@ -174,13 +175,16 @@ static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
if (!root)
return;
- debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
- debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
- debugfs_create_u32("state", S_IRUSR, root, &host->state);
- debugfs_create_xul("pending_events", S_IRUSR, root,
+ debugfs_create_file("regs", 0400, root, host, &dw_mci_regs_fops);
+ debugfs_create_file("req", 0400, root, slot, &dw_mci_req_fops);
+ debugfs_create_u32("state", 0400, root, &host->state);
+ debugfs_create_xul("pending_events", 0400, root,
&host->pending_events);
- debugfs_create_xul("completed_events", S_IRUSR, root,
+ debugfs_create_xul("completed_events", 0400, root,
&host->completed_events);
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc);
+#endif
}
#endif /* defined(CONFIG_DEBUG_FS) */
@@ -329,8 +333,8 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
cmdr == MMC_READ_MULTIPLE_BLOCK ||
cmdr == MMC_WRITE_BLOCK ||
cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
- cmdr == MMC_SEND_TUNING_BLOCK ||
- cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
+ mmc_op_tuning(cmdr) ||
+ cmdr == MMC_GEN_CMD) {
stop->opcode = MMC_STOP_TRANSMISSION;
stop->arg = 0;
stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
@@ -489,7 +493,7 @@ static void dw_mci_dmac_complete_dma(void *arg)
*/
if (data) {
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
}
@@ -782,6 +786,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
int ret = 0;
/* Set external dma config: burst size, burst width */
+ memset(&cfg, 0, sizeof(cfg));
cfg.dst_addr = host->phy_regs + fifo_offset;
cfg.src_addr = cfg.dst_addr;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -1177,7 +1182,7 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
/*
* Use the initial fifoth_val for PIO mode. If wm_algined
* is set, we set watermark same as data size.
- * If next issued data may be transfered by DMA mode,
+ * If next issued data may be transferred by DMA mode,
* prev_blksz should be invalidated.
*/
if (host->wm_aligned)
@@ -1277,6 +1282,37 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
mci_writel(host, CTYPE, (slot->ctype << slot->id));
}
+static void dw_mci_set_data_timeout(struct dw_mci *host,
+ unsigned int timeout_ns)
+{
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ u32 clk_div, tmout;
+ u64 tmp;
+
+ if (drv_data && drv_data->set_data_timeout)
+ return drv_data->set_data_timeout(host, timeout_ns);
+
+ clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2;
+ if (clk_div == 0)
+ clk_div = 1;
+
+ tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC);
+ tmp = DIV_ROUND_UP_ULL(tmp, clk_div);
+
+ /* TMOUT[7:0] (RESPONSE_TIMEOUT) */
+ tmout = 0xFF; /* Set maximum */
+
+ /* TMOUT[31:8] (DATA_TIMEOUT) */
+ if (!tmp || tmp > 0xFFFFFF)
+ tmout |= (0xFFFFFF << 8);
+ else
+ tmout |= (tmp & 0xFFFFFF) << 8;
+
+ mci_writel(host, TMOUT, tmout);
+ dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x",
+ timeout_ns, tmout >> 8);
+}
+
static void __dw_mci_start_request(struct dw_mci *host,
struct dw_mci_slot *slot,
struct mmc_command *cmd)
@@ -1297,7 +1333,7 @@ static void __dw_mci_start_request(struct dw_mci *host,
data = cmd->data;
if (data) {
- mci_writel(host, TMOUT, 0xFFFFFFFF);
+ dw_mci_set_data_timeout(host, data->timeout_ns);
mci_writel(host, BYTCNT, data->blksz*data->blocks);
mci_writel(host, BLKSIZ, data->blksz);
}
@@ -1325,7 +1361,7 @@ static void __dw_mci_start_request(struct dw_mci *host,
* is just about to roll over.
*
* We do this whole thing under spinlock and only if the
- * command hasn't already completed (indicating the the irq
+ * command hasn't already completed (indicating the irq
* already ran so we don't want the timeout).
*/
spin_lock_irqsave(&host->irq_lock, irqflags);
@@ -1581,6 +1617,7 @@ static void dw_mci_hw_reset(struct mmc_host *mmc)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
int reset;
if (host->use_dma == TRANS_MODE_IDMAC)
@@ -1590,6 +1627,11 @@ static void dw_mci_hw_reset(struct mmc_host *mmc)
SDMMC_CTRL_FIFO_RESET))
return;
+ if (drv_data && drv_data->hw_reset) {
+ drv_data->hw_reset(host);
+ return;
+ }
+
/*
* According to eMMC spec, card reset procedure:
* tRstW >= 1us: RST_n pulse width
@@ -1605,37 +1647,32 @@ static void dw_mci_hw_reset(struct mmc_host *mmc)
usleep_range(200, 300);
}
-static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
+static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare)
{
- struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
+ u32 clk_en_a_old;
+ u32 clk_en_a;
/*
* Low power mode will stop the card clock when idle. According to the
* description of the CLKENA register we should disable low power mode
* for SDIO cards if we need SDIO interrupts to work.
*/
- if (mmc->caps & MMC_CAP_SDIO_IRQ) {
- const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
- u32 clk_en_a_old;
- u32 clk_en_a;
-
- clk_en_a_old = mci_readl(host, CLKENA);
- if (card->type == MMC_TYPE_SDIO ||
- card->type == MMC_TYPE_SD_COMBO) {
- set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
- clk_en_a = clk_en_a_old & ~clken_low_pwr;
- } else {
- clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
- clk_en_a = clk_en_a_old | clken_low_pwr;
- }
+ clk_en_a_old = mci_readl(host, CLKENA);
+ if (prepare) {
+ set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old & ~clken_low_pwr;
+ } else {
+ clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old | clken_low_pwr;
+ }
- if (clk_en_a != clk_en_a_old) {
- mci_writel(host, CLKENA, clk_en_a);
- mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
- SDMMC_CMD_PRV_DAT_WAIT, 0);
- }
+ if (clk_en_a != clk_en_a_old) {
+ mci_writel(host, CLKENA, clk_en_a);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT,
+ 0);
}
}
@@ -1663,6 +1700,7 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ dw_mci_prepare_sdio_irq(slot, enb);
__dw_mci_enable_sdio_irq(slot, enb);
/* Avoid runtime suspending the device when SDIO IRQ is enabled */
@@ -1778,16 +1816,81 @@ static const struct mmc_host_ops dw_mci_ops = {
.set_ios = dw_mci_set_ios,
.get_ro = dw_mci_get_ro,
.get_cd = dw_mci_get_cd,
- .hw_reset = dw_mci_hw_reset,
+ .card_hw_reset = dw_mci_hw_reset,
.enable_sdio_irq = dw_mci_enable_sdio_irq,
.ack_sdio_irq = dw_mci_ack_sdio_irq,
.execute_tuning = dw_mci_execute_tuning,
.card_busy = dw_mci_card_busy,
.start_signal_voltage_switch = dw_mci_switch_voltage,
- .init_card = dw_mci_init_card,
.prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
};
+#ifdef CONFIG_FAULT_INJECTION
+static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
+{
+ struct dw_mci *host = container_of(t, struct dw_mci, fault_timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->irq_lock, flags);
+
+ /*
+ * Only inject an error if we haven't already got an error or data over
+ * interrupt.
+ */
+ if (!host->data_status) {
+ host->data_status = SDMMC_INT_DCRC;
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+ queue_work(system_bh_wq, &host->bh_work);
+ }
+
+ spin_unlock_irqrestore(&host->irq_lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+static void dw_mci_start_fault_timer(struct dw_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ if (!data || data->blocks <= 1)
+ return;
+
+ if (!should_fail(&host->fail_data_crc, 1))
+ return;
+
+ /*
+ * Try to inject the error at random points during the data transfer.
+ */
+ hrtimer_start(&host->fault_timer,
+ ms_to_ktime(get_random_u32_below(25)),
+ HRTIMER_MODE_REL);
+}
+
+static void dw_mci_stop_fault_timer(struct dw_mci *host)
+{
+ hrtimer_cancel(&host->fault_timer);
+}
+
+static void dw_mci_init_fault(struct dw_mci *host)
+{
+ host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
+
+ hrtimer_setup(&host->fault_timer, dw_mci_fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+}
+#else
+static void dw_mci_init_fault(struct dw_mci *host)
+{
+}
+
+static void dw_mci_start_fault_timer(struct dw_mci *host)
+{
+}
+
+static void dw_mci_stop_fault_timer(struct dw_mci *host)
+{
+}
+#endif
+
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
__releases(&host->lock)
__acquires(&host->lock)
@@ -1899,12 +2002,16 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
static void dw_mci_set_drto(struct dw_mci *host)
{
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
unsigned int drto_clks;
unsigned int drto_div;
unsigned int drto_ms;
unsigned long irqflags;
- drto_clks = mci_readl(host, TMOUT) >> 8;
+ if (drv_data && drv_data->get_drto_clks)
+ drto_clks = drv_data->get_drto_clks(host);
+ else
+ drto_clks = mci_readl(host, TMOUT) >> 8;
drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
if (drto_div == 0)
drto_div = 1;
@@ -1912,6 +2019,8 @@ static void dw_mci_set_drto(struct dw_mci *host)
drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
host->bus_hz);
+ dev_dbg(host->dev, "drto_ms: %u\n", drto_ms);
+
/* add a bit spare time */
drto_ms += 10;
@@ -1931,10 +2040,10 @@ static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
* Really be certain that the timer has stopped. This is a bit of
* paranoia and could only really happen if we had really bad
* interrupt latency and the interrupt routine and timeout were
- * running concurrently so that the del_timer() in the interrupt
+ * running concurrently so that the timer_delete() in the interrupt
* handler couldn't run.
*/
- WARN_ON(del_timer_sync(&host->cto_timer));
+ WARN_ON(timer_delete_sync(&host->cto_timer));
clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
return true;
@@ -1946,15 +2055,15 @@ static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
return false;
/* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
- WARN_ON(del_timer_sync(&host->dto_timer));
+ WARN_ON(timer_delete_sync(&host->dto_timer));
clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
return true;
}
-static void dw_mci_tasklet_func(struct tasklet_struct *t)
+static void dw_mci_work_func(struct work_struct *t)
{
- struct dw_mci *host = from_tasklet(host, t, tasklet);
+ struct dw_mci *host = from_work(host, t, bh_work);
struct mmc_data *data;
struct mmc_command *cmd;
struct mmc_request *mrq;
@@ -2009,17 +2118,18 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
* will waste a bit of time (we already know
* the command was bad), it can't cause any
* errors since it's possible it would have
- * taken place anyway if this tasklet got
+ * taken place anyway if this bh work got
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
- if (err != -ETIMEDOUT) {
+ if (err != -ETIMEDOUT &&
+ host->dir_status == DW_MCI_RECV_STATUS) {
state = STATE_SENDING_DATA;
continue;
}
- dw_mci_stop_dma(host);
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_SENDING_STOP;
break;
}
@@ -2043,10 +2153,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
- dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}
@@ -2079,10 +2189,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
- dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}
@@ -2102,6 +2212,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
break;
}
+ dw_mci_stop_fault_timer(host);
host->data = NULL;
set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
err = dw_mci_data_complete(host, data);
@@ -2151,6 +2262,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
if (mrq->cmd->error && mrq->data)
dw_mci_reset(host);
+ dw_mci_stop_fault_timer(host);
host->cmd = NULL;
host->data = NULL;
@@ -2466,6 +2578,91 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
}
}
+static void dw_mci_push_data64_32(struct dw_mci *host, void *buf, int cnt)
+{
+ struct mmc_data *data = host->data;
+ int init_cnt = cnt;
+
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+
+ buf += len;
+ cnt -= len;
+
+ if (host->part_buf_count == 8) {
+ mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_fifo_l_writeq(host->fifo_reg, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+
+ for (; cnt >= 8; cnt -= 8)
+ mci_fifo_l_writeq(host->fifo_reg, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) ==
+ (data->blksz * data->blocks))
+ mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
+ }
+}
+
+static void dw_mci_pull_data64_32(struct dw_mci *host, void *buf, int cnt)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ /* pull data from fifo into aligned buffer */
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_fifo_l_readq(host->fifo_reg);
+
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+
+ for (; cnt >= 8; cnt -= 8)
+ *pdata++ = mci_fifo_l_readq(host->fifo_reg);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf = mci_fifo_l_readq(host->fifo_reg);
+ dw_mci_pull_final_bytes(host, buf, cnt);
+ }
+}
+
static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
{
int len;
@@ -2591,7 +2788,7 @@ done:
static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
{
- del_timer(&host->cto_timer);
+ timer_delete(&host->cto_timer);
if (!host->cmd_status)
host->cmd_status = status;
@@ -2599,7 +2796,9 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
+
+ dw_mci_start_fault_timer(host);
}
static void dw_mci_handle_cd(struct dw_mci *host)
@@ -2633,13 +2832,13 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
dw_mci_cmd_interrupt(host, pending);
spin_unlock(&host->irq_lock);
- del_timer(&host->cmd11_timer);
+ timer_delete(&host->cmd11_timer);
}
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
spin_lock(&host->irq_lock);
- del_timer(&host->cto_timer);
+ timer_delete(&host->cto_timer);
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
smp_wmb(); /* drain writebuffer */
@@ -2649,18 +2848,31 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+ spin_lock(&host->irq_lock);
+
+ if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
+ timer_delete(&host->dto_timer);
+
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
host->data_status = pending;
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_DATA_ERROR, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+
+ if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
+ /* In case of error, we cannot expect a DTO */
+ set_bit(EVENT_DATA_COMPLETE,
+ &host->pending_events);
+
+ queue_work(system_bh_wq, &host->bh_work);
+
+ spin_unlock(&host->irq_lock);
}
if (pending & SDMMC_INT_DATA_OVER) {
spin_lock(&host->irq_lock);
- del_timer(&host->dto_timer);
+ timer_delete(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (!host->data_status)
@@ -2671,7 +2883,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
dw_mci_read_data_pio(host, true);
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
spin_unlock(&host->irq_lock);
}
@@ -2751,6 +2963,9 @@ static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
if (host->pdata->pm_caps)
mmc->pm_caps = host->pdata->pm_caps;
+ if (drv_data)
+ mmc->caps |= drv_data->common_caps;
+
if (host->dev->of_node) {
ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
if (ctrl_id < 0)
@@ -2771,7 +2986,12 @@ static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
if (host->pdata->caps2)
mmc->caps2 = host->pdata->caps2;
- mmc->f_min = DW_MCI_FREQ_MIN;
+ /* if host has set a minimum_freq, we should respect it */
+ if (host->minimum_speed)
+ mmc->f_min = host->minimum_speed;
+ else
+ mmc->f_min = DW_MCI_FREQ_MIN;
+
if (!mmc->f_max)
mmc->f_max = DW_MCI_FREQ_MAX;
@@ -2788,7 +3008,7 @@ static int dw_mci_init_slot(struct dw_mci *host)
struct dw_mci_slot *slot;
int ret;
- mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
+ mmc = devm_mmc_alloc_host(host->dev, sizeof(*slot));
if (!mmc)
return -ENOMEM;
@@ -2804,18 +3024,18 @@ static int dw_mci_init_slot(struct dw_mci *host)
/*if there are external regulators, get them*/
ret = mmc_regulator_get_supply(mmc);
if (ret)
- goto err_host_allocated;
+ return ret;
if (!mmc->ocr_avail)
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
ret = mmc_of_parse(mmc);
if (ret)
- goto err_host_allocated;
+ return ret;
ret = dw_mci_init_slot_caps(slot);
if (ret)
- goto err_host_allocated;
+ return ret;
/* Useful defaults if platform data is unset. */
if (host->use_dma == TRANS_MODE_IDMAC) {
@@ -2845,17 +3065,13 @@ static int dw_mci_init_slot(struct dw_mci *host)
ret = mmc_add_host(mmc);
if (ret)
- goto err_host_allocated;
+ return ret;
#if defined(CONFIG_DEBUG_FS)
dw_mci_init_debugfs(slot);
#endif
return 0;
-
-err_host_allocated:
- mmc_free_host(mmc);
- return ret;
}
static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
@@ -2863,7 +3079,6 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
/* Debugfs stuff is cleaned up by mmc core */
mmc_remove_host(slot->mmc);
slot->host->slot = NULL;
- mmc_free_host(slot->mmc);
}
static void dw_mci_init_dma(struct dw_mci *host)
@@ -2905,9 +3120,8 @@ static void dw_mci_init_dma(struct dw_mci *host)
host->dma_64bit_address = 1;
dev_info(host->dev,
"IDMAC supports 64-bit address mode.\n");
- if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
- dma_set_coherent_mask(host->dev,
- DMA_BIT_MASK(64));
+ if (dma_set_mask_and_coherent(host->dev, DMA_BIT_MASK(64)))
+ dev_info(host->dev, "Fail to set 64-bit DMA mask");
} else {
/* host supports IDMAC in 32-bit address mode */
host->dma_64bit_address = 0;
@@ -2930,8 +3144,7 @@ static void dw_mci_init_dma(struct dw_mci *host)
dev_info(host->dev, "Using internal DMA controller.\n");
} else {
/* TRANS_MODE_EDMAC: check dma bindings again */
- if ((device_property_read_string_array(dev, "dma-names",
- NULL, 0) < 0) ||
+ if ((device_property_string_array_count(dev, "dma-names") < 0) ||
!device_property_present(dev, "dmas")) {
goto no_dma;
}
@@ -2960,7 +3173,7 @@ no_dma:
static void dw_mci_cmd11_timer(struct timer_list *t)
{
- struct dw_mci *host = from_timer(host, t, cmd11_timer);
+ struct dw_mci *host = timer_container_of(host, t, cmd11_timer);
if (host->state != STATE_SENDING_CMD11) {
dev_warn(host->dev, "Unexpected CMD11 timeout\n");
@@ -2969,12 +3182,12 @@ static void dw_mci_cmd11_timer(struct timer_list *t)
host->cmd_status = SDMMC_INT_RTO;
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
static void dw_mci_cto_timer(struct timer_list *t)
{
- struct dw_mci *host = from_timer(host, t, cto_timer);
+ struct dw_mci *host = timer_container_of(host, t, cto_timer);
unsigned long irqflags;
u32 pending;
@@ -3015,7 +3228,7 @@ static void dw_mci_cto_timer(struct timer_list *t)
*/
host->cmd_status = SDMMC_INT_RTO;
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
break;
default:
dev_warn(host->dev, "Unexpected command timeout, state %d\n",
@@ -3029,7 +3242,7 @@ exit:
static void dw_mci_dto_timer(struct timer_list *t)
{
- struct dw_mci *host = from_timer(host, t, dto_timer);
+ struct dw_mci *host = timer_container_of(host, t, dto_timer);
unsigned long irqflags;
u32 pending;
@@ -3066,7 +3279,7 @@ static void dw_mci_dto_timer(struct timer_list *t)
host->data_status = SDMMC_INT_DRTO;
set_bit(EVENT_DATA_ERROR, &host->pending_events);
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
break;
default:
dev_warn(host->dev, "Unexpected data timeout, state %d\n",
@@ -3164,6 +3377,10 @@ int dw_mci_probe(struct dw_mci *host)
host->biu_clk = devm_clk_get(host->dev, "biu");
if (IS_ERR(host->biu_clk)) {
dev_dbg(host->dev, "biu clock not available\n");
+ ret = PTR_ERR(host->biu_clk);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
} else {
ret = clk_prepare_enable(host->biu_clk);
if (ret) {
@@ -3175,6 +3392,10 @@ int dw_mci_probe(struct dw_mci *host)
host->ciu_clk = devm_clk_get(host->dev, "ciu");
if (IS_ERR(host->ciu_clk)) {
dev_dbg(host->dev, "ciu clock not available\n");
+ ret = PTR_ERR(host->ciu_clk);
+ if (ret == -EPROBE_DEFER)
+ goto err_clk_biu;
+
host->bus_hz = host->pdata->bus_hz;
} else {
ret = clk_prepare_enable(host->ciu_clk);
@@ -3223,6 +3444,8 @@ int dw_mci_probe(struct dw_mci *host)
spin_lock_init(&host->irq_lock);
INIT_LIST_HEAD(&host->queue);
+ dw_mci_init_fault(host);
+
/*
* Get the host data width - this assumes that HCON has been set with
* the correct values.
@@ -3234,8 +3457,13 @@ int dw_mci_probe(struct dw_mci *host)
width = 16;
host->data_shift = 1;
} else if (i == 2) {
- host->push_data = dw_mci_push_data64;
- host->pull_data = dw_mci_pull_data64;
+ if ((host->quirks & DW_MMC_QUIRK_FIFO64_32)) {
+ host->push_data = dw_mci_push_data64_32;
+ host->pull_data = dw_mci_pull_data64_32;
+ } else {
+ host->push_data = dw_mci_push_data64;
+ host->pull_data = dw_mci_pull_data64;
+ }
width = 64;
host->data_shift = 3;
} else {
@@ -3304,7 +3532,7 @@ int dw_mci_probe(struct dw_mci *host)
else
host->fifo_reg = host->regs + DATA_240A_OFFSET;
- tasklet_setup(&host->tasklet, dw_mci_tasklet_func);
+ INIT_WORK(&host->bh_work, dw_mci_work_func);
ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
host->irq_flags, "dw-mci", host);
if (ret)
@@ -3388,7 +3616,7 @@ int dw_mci_runtime_suspend(struct device *dev)
clk_disable_unprepare(host->ciu_clk);
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
+ (mmc_host_can_gpio_cd(host->slot->mmc) ||
!mmc_card_is_removable(host->slot->mmc)))
clk_disable_unprepare(host->biu_clk);
@@ -3402,7 +3630,7 @@ int dw_mci_runtime_resume(struct device *dev)
struct dw_mci *host = dev_get_drvdata(dev);
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
+ (mmc_host_can_gpio_cd(host->slot->mmc) ||
!mmc_card_is_removable(host->slot->mmc))) {
ret = clk_prepare_enable(host->biu_clk);
if (ret)
@@ -3439,7 +3667,7 @@ int dw_mci_runtime_resume(struct device *dev)
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
- if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
+ if (host->slot && host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
/* Force setup bus to guarantee available clock output */
@@ -3456,7 +3684,7 @@ int dw_mci_runtime_resume(struct device *dev)
err:
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
+ (mmc_host_can_gpio_cd(host->slot->mmc) ||
!mmc_card_is_removable(host->slot->mmc)))
clk_disable_unprepare(host->biu_clk);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index da5923a92e60..648b4a5641bf 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -14,7 +14,10 @@
#include <linux/mmc/core.h>
#include <linux/dmaengine.h>
#include <linux/reset.h>
+#include <linux/fault-inject.h>
+#include <linux/hrtimer.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
enum dw_mci_state {
STATE_IDLE = 0,
@@ -87,16 +90,17 @@ struct dw_mci_dma_slave {
* @stop_cmdr: Value to be loaded into CMDR when the stop command is
* to be sent.
* @dir_status: Direction of current transfer.
- * @tasklet: Tasklet running the request state machine.
+ * @bh_work: Work running the request state machine.
* @pending_events: Bitmask of events flagged by the interrupt handler
- * to be processed by the tasklet.
+ * to be processed by bh work.
* @completed_events: Bitmask of events which the state machine has
* processed.
- * @state: Tasklet state.
+ * @state: BH work state.
* @queue: List of slots waiting for access to the controller.
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
* rate and timeout calculations.
* @current_speed: Configured rate of the controller.
+ * @minimum_speed: Stored minimum rate of the controller.
* @fifoth_val: The value of FIFOTH register.
* @verid: Denote Version ID.
* @dev: Device associated with the MMC controller.
@@ -116,6 +120,7 @@ struct dw_mci_dma_slave {
* @part_buf: Simple buffer for partial fifo reads/writes.
* @push_data: Pointer to FIFO push function.
* @pull_data: Pointer to FIFO pull function.
+ * @quirks: Set of quirks that apply to specific versions of the IP.
* @vqmmc_enabled: Status of vqmmc, should be true or false.
* @irq_flags: The flags to be passed to request_irq.
* @irq: The irq value to be passed to request_irq.
@@ -190,7 +195,7 @@ struct dw_mci {
u32 data_status;
u32 stop_cmdr;
u32 dir_status;
- struct tasklet_struct tasklet;
+ struct work_struct bh_work;
unsigned long pending_events;
unsigned long completed_events;
enum dw_mci_state state;
@@ -198,6 +203,7 @@ struct dw_mci {
u32 bus_hz;
u32 current_speed;
+ u32 minimum_speed;
u32 fifoth_val;
u16 verid;
struct device *dev;
@@ -221,6 +227,7 @@ struct dw_mci {
void (*push_data)(struct dw_mci *host, void *buf, int cnt);
void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
+ u32 quirks;
bool vqmmc_enabled;
unsigned long irq_flags; /* IRQ flags */
int irq;
@@ -230,6 +237,11 @@ struct dw_mci {
struct timer_list cmd11_timer;
struct timer_list cto_timer;
struct timer_list dto_timer;
+
+#ifdef CONFIG_FAULT_INJECTION
+ struct fault_attr fail_data_crc;
+ struct hrtimer fault_timer;
+#endif
};
/* DMA ops for Internal/External DMAC interface */
@@ -267,6 +279,11 @@ struct dw_mci_board {
struct dma_pdata *data;
};
+/* Support for longer data read timeout */
+#define DW_MMC_QUIRK_EXTENDED_TMOUT BIT(0)
+/* Force 32-bit access to the FIFO */
+#define DW_MMC_QUIRK_FIFO64_32 BIT(1)
+
#define DW_MMC_240A 0x240a
#define DW_MMC_280A 0x280a
@@ -457,6 +474,31 @@ struct dw_mci_board {
#define mci_fifo_writel(__value, __reg) __raw_writel(__reg, __value)
#define mci_fifo_writeq(__value, __reg) __raw_writeq(__reg, __value)
+/*
+ * Some dw_mmc devices have 64-bit FIFOs, but expect them to be
+ * accessed using two 32-bit accesses. If such controller is used
+ * with a 64-bit kernel, this has to be done explicitly.
+ */
+static inline u64 mci_fifo_l_readq(void __iomem *addr)
+{
+ u64 ans;
+ u32 proxy[2];
+
+ proxy[0] = mci_fifo_readl(addr);
+ proxy[1] = mci_fifo_readl(addr + 4);
+ memcpy(&ans, proxy, 8);
+ return ans;
+}
+
+static inline void mci_fifo_l_writeq(void __iomem *addr, u64 value)
+{
+ u32 proxy[2];
+
+ memcpy(proxy, &value, 8);
+ mci_fifo_writel(addr, proxy[0]);
+ mci_fifo_writel(addr + 4, proxy[1]);
+}
+
/* Register access macros */
#define mci_readl(dev, reg) \
readl_relaxed((dev)->regs + SDMMC_##reg)
@@ -499,6 +541,9 @@ extern void dw_mci_remove(struct dw_mci *host);
#ifdef CONFIG_PM
extern int dw_mci_runtime_suspend(struct device *device);
extern int dw_mci_runtime_resume(struct device *device);
+#else
+static inline int dw_mci_runtime_suspend(struct device *device) { return -EOPNOTSUPP; }
+static inline int dw_mci_runtime_resume(struct device *device) { return -EOPNOTSUPP; }
#endif
/**
@@ -543,10 +588,15 @@ struct dw_mci_slot {
* dw_mci driver data - dw-mshc implementation specific driver data.
* @caps: mmc subsystem specified capabilities of the controller(s).
* @num_caps: number of capabilities specified by @caps.
+ * @common_caps: mmc subsystem specified capabilities applicable to all of
+ * the controllers
* @init: early implementation specific initialization.
* @set_ios: handle bus specific extensions.
* @parse_dt: parse implementation specific device tree properties.
* @execute_tuning: implementation specific tuning procedure.
+ * @set_data_timeout: implementation specific timeout.
+ * @get_drto_clks: implementation specific cycle count for data read timeout.
+ * @hw_reset: implementation specific HW reset.
*
* Provide controller implementation specific extensions. The usage of this
* data structure is fully optional and usage of each member in this structure
@@ -555,6 +605,7 @@ struct dw_mci_slot {
struct dw_mci_drv_data {
unsigned long *caps;
u32 num_caps;
+ u32 common_caps;
int (*init)(struct dw_mci *host);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
@@ -563,5 +614,9 @@ struct dw_mci_drv_data {
struct mmc_ios *ios);
int (*switch_voltage)(struct mmc_host *mmc,
struct mmc_ios *ios);
+ void (*set_data_timeout)(struct dw_mci *host,
+ unsigned int timeout_ns);
+ u32 (*get_drto_clks)(struct dw_mci *host);
+ void (*hw_reset)(struct dw_mci *host);
};
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 0db17bcc9c16..6a0d0250d47b 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -18,9 +18,11 @@
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
@@ -158,6 +160,8 @@ struct jz4740_mmc_host {
struct mmc_request *req;
struct mmc_command *cmd;
+ bool vqmmc_enabled;
+
unsigned long waiting;
uint32_t cmdat;
@@ -217,11 +221,23 @@ static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
return;
dma_release_channel(host->dma_tx);
- dma_release_channel(host->dma_rx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
}
static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
{
+ struct device *dev = mmc_dev(host->mmc);
+
+ host->dma_tx = dma_request_chan(dev, "tx-rx");
+ if (!IS_ERR(host->dma_tx))
+ return 0;
+
+ if (PTR_ERR(host->dma_tx) != -ENODEV) {
+ dev_err(dev, "Failed to get dma tx-rx channel\n");
+ return PTR_ERR(host->dma_tx);
+ }
+
host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
if (IS_ERR(host->dma_tx)) {
dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
@@ -235,13 +251,36 @@ static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
return PTR_ERR(host->dma_rx);
}
+ /*
+ * Limit the maximum segment size in any SG entry according to
+ * the parameters of the DMA engine device.
+ */
+ if (host->dma_tx) {
+ struct device *dev = host->dma_tx->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+
+ if (host->dma_rx) {
+ struct device *dev = host->dma_rx->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+
return 0;
}
static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
struct mmc_data *data)
{
- return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx;
+ if ((data->flags & MMC_DATA_READ) && host->dma_rx)
+ return host->dma_rx;
+ else
+ return host->dma_tx;
}
static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
@@ -263,7 +302,7 @@ static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
{
struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
enum dma_data_direction dir = mmc_get_dma_dir(data);
- int sg_count;
+ unsigned int sg_count;
if (data->host_cookie == COOKIE_PREMAPPED)
return data->sg_count;
@@ -273,7 +312,7 @@ static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
data->sg_len,
dir);
- if (sg_count <= 0) {
+ if (!sg_count) {
dev_err(mmc_dev(host->mmc),
"Failed to map scatterlist for DMA operation\n");
return -EINVAL;
@@ -578,10 +617,6 @@ static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
}
}
data->bytes_xfered += miter->length;
-
- /* This can go away once MIPS implements
- * flush_kernel_dcache_page */
- flush_dcache_page(miter->page);
}
sg_miter_stop(miter);
@@ -606,7 +641,8 @@ poll_timeout:
static void jz4740_mmc_timeout(struct timer_list *t)
{
- struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
+ struct jz4740_mmc_host *host = timer_container_of(host, t,
+ timeout_timer);
if (!test_and_clear_bit(0, &host->waiting))
return;
@@ -789,6 +825,8 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
break;
}
}
+ fallthrough;
+
case JZ4740_MMC_STATE_DONE:
break;
}
@@ -825,7 +863,7 @@ static irqreturn_t jz_mmc_irq(int irq, void *devid)
if (host->req && cmd && irq_reg) {
if (test_and_clear_bit(0, &host->waiting)) {
- del_timer(&host->timeout_timer);
+ timer_delete(&host->timeout_timer);
if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
cmd->error = -ETIMEDOUT;
@@ -902,6 +940,8 @@ static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct jz4740_mmc_host *host = mmc_priv(mmc);
+ int ret;
+
if (ios->clock)
jz4740_mmc_set_clock_rate(host, ios->clock);
@@ -914,12 +954,25 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
clk_prepare_enable(host->clk);
break;
case MMC_POWER_ON:
+ if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret)
+ dev_err(&host->pdev->dev, "Failed to set vqmmc power!\n");
+ else
+ host->vqmmc_enabled = true;
+ }
break;
- default:
+ case MMC_POWER_OFF:
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
+ regulator_disable(mmc->supply.vqmmc);
+ host->vqmmc_enabled = false;
+ }
clk_disable_unprepare(host->clk);
break;
+ default:
+ break;
}
switch (ios->bus_width) {
@@ -945,6 +998,23 @@ static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
}
+static int jz4740_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ int ret;
+
+ /* vqmmc regulator is available */
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
+ return ret < 0 ? ret : 0;
+ }
+
+ /* no vqmmc regulator, assume fixed regulator at 3/3.3V */
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ return 0;
+
+ return -EINVAL;
+}
+
static const struct mmc_host_ops jz4740_mmc_ops = {
.request = jz4740_mmc_request,
.pre_req = jz4740_mmc_pre_request,
@@ -953,6 +1023,7 @@ static const struct mmc_host_ops jz4740_mmc_ops = {
.get_ro = mmc_gpio_get_ro,
.get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
+ .start_signal_voltage_switch = jz4740_voltage_switch,
};
static const struct of_device_id jz4740_mmc_of_match[] = {
@@ -971,9 +1042,8 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
int ret;
struct mmc_host *mmc;
struct jz4740_mmc_host *host;
- const struct of_device_id *match;
- mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
if (!mmc) {
dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
return -ENOMEM;
@@ -981,45 +1051,42 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
host = mmc_priv(mmc);
- match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
- if (match) {
- host->version = (enum jz4740_mmc_version)match->data;
- } else {
- /* JZ4740 should be the only one using legacy probe */
- host->version = JZ_MMC_JZ4740;
- }
+ /* Default if no match is JZ4740 */
+ host->version = (enum jz4740_mmc_version)device_get_match_data(&pdev->dev);
ret = mmc_of_parse(mmc);
- if (ret) {
- dev_err_probe(&pdev->dev, ret, "could not parse device properties\n");
- goto err_free_host;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not parse device properties\n");
mmc_regulator_get_supply(mmc);
host->irq = platform_get_irq(pdev, 0);
- if (host->irq < 0) {
- ret = host->irq;
- goto err_free_host;
- }
+ if (host->irq < 0)
+ return host->irq;
host->clk = devm_clk_get(&pdev->dev, "mmc");
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- dev_err(&pdev->dev, "Failed to get mmc clock\n");
- goto err_free_host;
- }
+ if (IS_ERR(host->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(host->clk),
+ "Failed to get mmc clock\n");
- host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
- if (IS_ERR(host->base)) {
- ret = PTR_ERR(host->base);
- goto err_free_host;
- }
+ host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &host->mem_res);
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
mmc->ops = &jz4740_mmc_ops;
if (!mmc->f_max)
mmc->f_max = JZ_MMC_CLK_RATE;
+
+ /*
+ * There seems to be a problem with this driver on the JZ4760 and
+ * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz),
+ * the communication fails with many SD cards.
+ * Until this bug is sorted out, limit the maximum rate to 24 MHz.
+ */
+ if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE)
+ mmc->f_max = JZ_MMC_CLK_RATE;
+
mmc->f_min = mmc->f_max / 128;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
@@ -1045,10 +1112,8 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
dev_name(&pdev->dev), host);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
- goto err_free_host;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to request irq\n");
jz4740_mmc_clock_disable(host);
timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
@@ -1079,17 +1144,14 @@ err_release_dma:
jz4740_mmc_release_dma_channels(host);
err_free_irq:
free_irq(host->irq, host);
-err_free_host:
- mmc_free_host(mmc);
-
return ret;
}
-static int jz4740_mmc_remove(struct platform_device *pdev)
+static void jz4740_mmc_remove(struct platform_device *pdev)
{
struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
- del_timer_sync(&host->timeout_timer);
+ timer_delete_sync(&host->timeout_timer);
jz4740_mmc_set_irq_enabled(host, 0xff, false);
jz4740_mmc_reset(host);
@@ -1099,24 +1161,20 @@ static int jz4740_mmc_remove(struct platform_device *pdev)
if (host->use_dma)
jz4740_mmc_release_dma_channels(host);
-
- mmc_free_host(host->mmc);
-
- return 0;
}
-static int __maybe_unused jz4740_mmc_suspend(struct device *dev)
+static int jz4740_mmc_suspend(struct device *dev)
{
return pinctrl_pm_select_sleep_state(dev);
}
-static int __maybe_unused jz4740_mmc_resume(struct device *dev)
+static int jz4740_mmc_resume(struct device *dev)
{
return pinctrl_select_default_state(dev);
}
-static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
- jz4740_mmc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
+ jz4740_mmc_resume);
static struct platform_driver jz4740_mmc_driver = {
.probe = jz4740_mmc_probe,
@@ -1124,8 +1182,8 @@ static struct platform_driver jz4740_mmc_driver = {
.driver = {
.name = "jz4740-mmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = of_match_ptr(jz4740_mmc_of_match),
- .pm = pm_ptr(&jz4740_mmc_pm_ops),
+ .of_match_table = jz4740_mmc_of_match,
+ .pm = pm_sleep_ptr(&jz4740_mmc_pm_ops),
},
};
diff --git a/drivers/mmc/host/litex_mmc.c b/drivers/mmc/host/litex_mmc.c
new file mode 100644
index 000000000000..d2f19c2dc673
--- /dev/null
+++ b/drivers/mmc/host/litex_mmc.c
@@ -0,0 +1,652 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LiteX LiteSDCard driver
+ *
+ * Copyright (C) 2019-2020 Antmicro <contact@antmicro.com>
+ * Copyright (C) 2019-2020 Kamil Rakoczy <krakoczy@antmicro.com>
+ * Copyright (C) 2019-2020 Maciej Dudek <mdudek@internships.antmicro.com>
+ * Copyright (C) 2020 Paul Mackerras <paulus@ozlabs.org>
+ * Copyright (C) 2020-2022 Gabriel Somlo <gsomlo@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/litex.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#define LITEX_PHY_CARDDETECT 0x00
+#define LITEX_PHY_CLOCKERDIV 0x04
+#define LITEX_PHY_INITIALIZE 0x08
+#define LITEX_PHY_WRITESTATUS 0x0C
+#define LITEX_CORE_CMDARG 0x00
+#define LITEX_CORE_CMDCMD 0x04
+#define LITEX_CORE_CMDSND 0x08
+#define LITEX_CORE_CMDRSP 0x0C
+#define LITEX_CORE_CMDEVT 0x1C
+#define LITEX_CORE_DATEVT 0x20
+#define LITEX_CORE_BLKLEN 0x24
+#define LITEX_CORE_BLKCNT 0x28
+#define LITEX_BLK2MEM_BASE 0x00
+#define LITEX_BLK2MEM_LEN 0x08
+#define LITEX_BLK2MEM_ENA 0x0C
+#define LITEX_BLK2MEM_DONE 0x10
+#define LITEX_BLK2MEM_LOOP 0x14
+#define LITEX_MEM2BLK_BASE 0x00
+#define LITEX_MEM2BLK_LEN 0x08
+#define LITEX_MEM2BLK_ENA 0x0C
+#define LITEX_MEM2BLK_DONE 0x10
+#define LITEX_MEM2BLK_LOOP 0x14
+#define LITEX_MEM2BLK 0x18
+#define LITEX_IRQ_STATUS 0x00
+#define LITEX_IRQ_PENDING 0x04
+#define LITEX_IRQ_ENABLE 0x08
+
+#define SD_CTL_DATA_XFER_NONE 0
+#define SD_CTL_DATA_XFER_READ 1
+#define SD_CTL_DATA_XFER_WRITE 2
+
+#define SD_CTL_RESP_NONE 0
+#define SD_CTL_RESP_SHORT 1
+#define SD_CTL_RESP_LONG 2
+#define SD_CTL_RESP_SHORT_BUSY 3
+
+#define SD_BIT_DONE BIT(0)
+#define SD_BIT_WR_ERR BIT(1)
+#define SD_BIT_TIMEOUT BIT(2)
+#define SD_BIT_CRC_ERR BIT(3)
+
+#define SD_SLEEP_US 5
+#define SD_TIMEOUT_US 20000
+
+#define SDIRQ_CARD_DETECT 1
+#define SDIRQ_SD_TO_MEM_DONE 2
+#define SDIRQ_MEM_TO_SD_DONE 4
+#define SDIRQ_CMD_DONE 8
+
+struct litex_mmc_host {
+ struct mmc_host *mmc;
+
+ void __iomem *sdphy;
+ void __iomem *sdcore;
+ void __iomem *sdreader;
+ void __iomem *sdwriter;
+ void __iomem *sdirq;
+
+ void *buffer;
+ size_t buf_size;
+ dma_addr_t dma;
+
+ struct completion cmd_done;
+ int irq;
+
+ unsigned int ref_clk;
+ unsigned int sd_clk;
+
+ u32 resp[4];
+ u16 rca;
+
+ bool is_bus_width_set;
+ bool app_cmd;
+};
+
+static int litex_mmc_sdcard_wait_done(void __iomem *reg, struct device *dev)
+{
+ u8 evt;
+ int ret;
+
+ ret = readx_poll_timeout(litex_read8, reg, evt, evt & SD_BIT_DONE,
+ SD_SLEEP_US, SD_TIMEOUT_US);
+ if (ret)
+ return ret;
+ if (evt == SD_BIT_DONE)
+ return 0;
+ if (evt & SD_BIT_WR_ERR)
+ return -EIO;
+ if (evt & SD_BIT_TIMEOUT)
+ return -ETIMEDOUT;
+ if (evt & SD_BIT_CRC_ERR)
+ return -EILSEQ;
+ dev_err(dev, "%s: unknown error (evt=%x)\n", __func__, evt);
+ return -EINVAL;
+}
+
+static int litex_mmc_send_cmd(struct litex_mmc_host *host,
+ u8 cmd, u32 arg, u8 response_len, u8 transfer)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ void __iomem *reg;
+ int ret;
+ u8 evt;
+
+ litex_write32(host->sdcore + LITEX_CORE_CMDARG, arg);
+ litex_write32(host->sdcore + LITEX_CORE_CMDCMD,
+ cmd << 8 | transfer << 5 | response_len);
+ litex_write8(host->sdcore + LITEX_CORE_CMDSND, 1);
+
+ /*
+ * Wait for an interrupt if we have an interrupt and either there is
+ * data to be transferred, or if the card can report busy via DAT0.
+ */
+ if (host->irq > 0 &&
+ (transfer != SD_CTL_DATA_XFER_NONE ||
+ response_len == SD_CTL_RESP_SHORT_BUSY)) {
+ reinit_completion(&host->cmd_done);
+ litex_write32(host->sdirq + LITEX_IRQ_ENABLE,
+ SDIRQ_CMD_DONE | SDIRQ_CARD_DETECT);
+ wait_for_completion(&host->cmd_done);
+ }
+
+ ret = litex_mmc_sdcard_wait_done(host->sdcore + LITEX_CORE_CMDEVT, dev);
+ if (ret) {
+ dev_err(dev, "Command (cmd %d) error, status %d\n", cmd, ret);
+ return ret;
+ }
+
+ if (response_len != SD_CTL_RESP_NONE) {
+ /*
+ * NOTE: this matches the semantics of litex_read32()
+ * regardless of underlying arch endianness!
+ */
+ memcpy_fromio(host->resp,
+ host->sdcore + LITEX_CORE_CMDRSP, 0x10);
+ }
+
+ if (!host->app_cmd && cmd == SD_SEND_RELATIVE_ADDR)
+ host->rca = (host->resp[3] >> 16);
+
+ host->app_cmd = (cmd == MMC_APP_CMD);
+
+ if (transfer == SD_CTL_DATA_XFER_NONE)
+ return ret; /* OK from prior litex_mmc_sdcard_wait_done() */
+
+ ret = litex_mmc_sdcard_wait_done(host->sdcore + LITEX_CORE_DATEVT, dev);
+ if (ret) {
+ dev_err(dev, "Data xfer (cmd %d) error, status %d\n", cmd, ret);
+ return ret;
+ }
+
+ /* Wait for completion of (read or write) DMA transfer */
+ reg = (transfer == SD_CTL_DATA_XFER_READ) ?
+ host->sdreader + LITEX_BLK2MEM_DONE :
+ host->sdwriter + LITEX_MEM2BLK_DONE;
+ ret = readx_poll_timeout(litex_read8, reg, evt, evt & SD_BIT_DONE,
+ SD_SLEEP_US, SD_TIMEOUT_US);
+ if (ret)
+ dev_err(dev, "DMA timeout (cmd %d)\n", cmd);
+
+ return ret;
+}
+
+static int litex_mmc_send_app_cmd(struct litex_mmc_host *host)
+{
+ return litex_mmc_send_cmd(host, MMC_APP_CMD, host->rca << 16,
+ SD_CTL_RESP_SHORT, SD_CTL_DATA_XFER_NONE);
+}
+
+static int litex_mmc_send_set_bus_w_cmd(struct litex_mmc_host *host, u32 width)
+{
+ return litex_mmc_send_cmd(host, SD_APP_SET_BUS_WIDTH, width,
+ SD_CTL_RESP_SHORT, SD_CTL_DATA_XFER_NONE);
+}
+
+static int litex_mmc_set_bus_width(struct litex_mmc_host *host)
+{
+ bool app_cmd_sent;
+ int ret;
+
+ if (host->is_bus_width_set)
+ return 0;
+
+ /* Ensure 'app_cmd' precedes 'app_set_bus_width_cmd' */
+ app_cmd_sent = host->app_cmd; /* was preceding command app_cmd? */
+ if (!app_cmd_sent) {
+ ret = litex_mmc_send_app_cmd(host);
+ if (ret)
+ return ret;
+ }
+
+ /* LiteSDCard only supports 4-bit bus width */
+ ret = litex_mmc_send_set_bus_w_cmd(host, MMC_BUS_WIDTH_4);
+ if (ret)
+ return ret;
+
+ /* Re-send 'app_cmd' if necessary */
+ if (app_cmd_sent) {
+ ret = litex_mmc_send_app_cmd(host);
+ if (ret)
+ return ret;
+ }
+
+ host->is_bus_width_set = true;
+
+ return 0;
+}
+
+static int litex_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct litex_mmc_host *host = mmc_priv(mmc);
+ int ret;
+
+ if (!mmc_card_is_removable(mmc))
+ return 1;
+
+ ret = !litex_read8(host->sdphy + LITEX_PHY_CARDDETECT);
+ if (ret)
+ return ret;
+
+ /* Ensure bus width will be set (again) upon card (re)insertion */
+ host->is_bus_width_set = false;
+
+ return 0;
+}
+
+static irqreturn_t litex_mmc_interrupt(int irq, void *arg)
+{
+ struct mmc_host *mmc = arg;
+ struct litex_mmc_host *host = mmc_priv(mmc);
+ u32 pending = litex_read32(host->sdirq + LITEX_IRQ_PENDING);
+ irqreturn_t ret = IRQ_NONE;
+
+ /* Check for card change interrupt */
+ if (pending & SDIRQ_CARD_DETECT) {
+ litex_write32(host->sdirq + LITEX_IRQ_PENDING,
+ SDIRQ_CARD_DETECT);
+ mmc_detect_change(mmc, msecs_to_jiffies(10));
+ ret = IRQ_HANDLED;
+ }
+
+ /* Check for command completed */
+ if (pending & SDIRQ_CMD_DONE) {
+ /* Disable it so it doesn't keep interrupting */
+ litex_write32(host->sdirq + LITEX_IRQ_ENABLE,
+ SDIRQ_CARD_DETECT);
+ complete(&host->cmd_done);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static u32 litex_mmc_response_len(struct mmc_command *cmd)
+{
+ if (cmd->flags & MMC_RSP_136)
+ return SD_CTL_RESP_LONG;
+ if (!(cmd->flags & MMC_RSP_PRESENT))
+ return SD_CTL_RESP_NONE;
+ if (cmd->flags & MMC_RSP_BUSY)
+ return SD_CTL_RESP_SHORT_BUSY;
+ return SD_CTL_RESP_SHORT;
+}
+
+static void litex_mmc_do_dma(struct litex_mmc_host *host, struct mmc_data *data,
+ unsigned int *len, bool *direct, u8 *transfer)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ dma_addr_t dma;
+ int sg_count;
+
+ /*
+ * Try to DMA directly to/from the data buffer.
+ * We can do that if the buffer can be mapped for DMA
+ * in one contiguous chunk.
+ */
+ dma = host->dma;
+ *len = data->blksz * data->blocks;
+ sg_count = dma_map_sg(dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (sg_count == 1) {
+ dma = sg_dma_address(data->sg);
+ *len = sg_dma_len(data->sg);
+ *direct = true;
+ } else if (*len > host->buf_size)
+ *len = host->buf_size;
+
+ if (data->flags & MMC_DATA_READ) {
+ litex_write8(host->sdreader + LITEX_BLK2MEM_ENA, 0);
+ litex_write64(host->sdreader + LITEX_BLK2MEM_BASE, dma);
+ litex_write32(host->sdreader + LITEX_BLK2MEM_LEN, *len);
+ litex_write8(host->sdreader + LITEX_BLK2MEM_ENA, 1);
+ *transfer = SD_CTL_DATA_XFER_READ;
+ } else if (data->flags & MMC_DATA_WRITE) {
+ if (!*direct)
+ sg_copy_to_buffer(data->sg, data->sg_len,
+ host->buffer, *len);
+ litex_write8(host->sdwriter + LITEX_MEM2BLK_ENA, 0);
+ litex_write64(host->sdwriter + LITEX_MEM2BLK_BASE, dma);
+ litex_write32(host->sdwriter + LITEX_MEM2BLK_LEN, *len);
+ litex_write8(host->sdwriter + LITEX_MEM2BLK_ENA, 1);
+ *transfer = SD_CTL_DATA_XFER_WRITE;
+ } else {
+ dev_warn(dev, "Data present w/o read or write flag.\n");
+ /* Continue: set cmd status, mark req done */
+ }
+
+ litex_write16(host->sdcore + LITEX_CORE_BLKLEN, data->blksz);
+ litex_write32(host->sdcore + LITEX_CORE_BLKCNT, data->blocks);
+}
+
+static void litex_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct litex_mmc_host *host = mmc_priv(mmc);
+ struct device *dev = mmc_dev(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_command *sbc = mrq->sbc;
+ struct mmc_data *data = mrq->data;
+ struct mmc_command *stop = mrq->stop;
+ unsigned int retries = cmd->retries;
+ unsigned int len = 0;
+ bool direct = false;
+ u32 response_len = litex_mmc_response_len(cmd);
+ u8 transfer = SD_CTL_DATA_XFER_NONE;
+
+ /* First check that the card is still there */
+ if (!litex_mmc_get_cd(mmc)) {
+ cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ /* Send set-block-count command if needed */
+ if (sbc) {
+ sbc->error = litex_mmc_send_cmd(host, sbc->opcode, sbc->arg,
+ litex_mmc_response_len(sbc),
+ SD_CTL_DATA_XFER_NONE);
+ if (sbc->error) {
+ host->is_bus_width_set = false;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ }
+
+ if (data) {
+ /*
+ * LiteSDCard only supports 4-bit bus width; therefore, we MUST
+ * inject a SET_BUS_WIDTH (acmd6) before the very first data
+ * transfer, earlier than when the mmc subsystem would normally
+ * get around to it!
+ */
+ cmd->error = litex_mmc_set_bus_width(host);
+ if (cmd->error) {
+ dev_err(dev, "Can't set bus width!\n");
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ litex_mmc_do_dma(host, data, &len, &direct, &transfer);
+ }
+
+ do {
+ cmd->error = litex_mmc_send_cmd(host, cmd->opcode, cmd->arg,
+ response_len, transfer);
+ } while (cmd->error && retries-- > 0);
+
+ if (cmd->error) {
+ /* Card may be gone; don't assume bus width is still set */
+ host->is_bus_width_set = false;
+ }
+
+ if (response_len == SD_CTL_RESP_SHORT) {
+ /* Pull short response fields from appropriate host registers */
+ cmd->resp[0] = host->resp[3];
+ cmd->resp[1] = host->resp[2] & 0xFF;
+ } else if (response_len == SD_CTL_RESP_LONG) {
+ cmd->resp[0] = host->resp[0];
+ cmd->resp[1] = host->resp[1];
+ cmd->resp[2] = host->resp[2];
+ cmd->resp[3] = host->resp[3];
+ }
+
+ /* Send stop-transmission command if required */
+ if (stop && (cmd->error || !sbc)) {
+ stop->error = litex_mmc_send_cmd(host, stop->opcode, stop->arg,
+ litex_mmc_response_len(stop),
+ SD_CTL_DATA_XFER_NONE);
+ if (stop->error)
+ host->is_bus_width_set = false;
+ }
+
+ if (data) {
+ dma_unmap_sg(dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ }
+
+ if (!cmd->error && transfer != SD_CTL_DATA_XFER_NONE) {
+ data->bytes_xfered = min(len, mmc->max_req_size);
+ if (transfer == SD_CTL_DATA_XFER_READ && !direct) {
+ sg_copy_from_buffer(data->sg, sg_nents(data->sg),
+ host->buffer, data->bytes_xfered);
+ }
+ }
+
+ mmc_request_done(mmc, mrq);
+}
+
+static void litex_mmc_setclk(struct litex_mmc_host *host, unsigned int freq)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ u32 div;
+
+ div = freq ? host->ref_clk / freq : 256U;
+ div = roundup_pow_of_two(div);
+ div = clamp(div, 2U, 256U);
+ dev_dbg(dev, "sd_clk_freq=%d: set to %d via div=%d\n",
+ freq, host->ref_clk / div, div);
+ litex_write16(host->sdphy + LITEX_PHY_CLOCKERDIV, div);
+ host->sd_clk = freq;
+}
+
+static void litex_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct litex_mmc_host *host = mmc_priv(mmc);
+
+ /*
+ * NOTE: Ignore any ios->bus_width updates; they occur right after
+ * the mmc core sends its own acmd6 bus-width change notification,
+ * which is redundant since we snoop on the command flow and inject
+ * an early acmd6 before the first data transfer command is sent!
+ */
+
+ /* Update sd_clk */
+ if (ios->clock != host->sd_clk)
+ litex_mmc_setclk(host, ios->clock);
+}
+
+static const struct mmc_host_ops litex_mmc_ops = {
+ .get_cd = litex_mmc_get_cd,
+ .request = litex_mmc_request,
+ .set_ios = litex_mmc_set_ios,
+};
+
+static int litex_mmc_irq_init(struct platform_device *pdev,
+ struct litex_mmc_host *host)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ int ret;
+
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ if (ret > 0)
+ host->irq = ret;
+ else {
+ dev_warn(dev, "Failed to get IRQ, using polling\n");
+ goto use_polling;
+ }
+
+ host->sdirq = devm_platform_ioremap_resource_byname(pdev, "irq");
+ if (IS_ERR(host->sdirq))
+ return PTR_ERR(host->sdirq);
+
+ ret = devm_request_irq(dev, host->irq, litex_mmc_interrupt, 0,
+ "litex-mmc", host->mmc);
+ if (ret < 0) {
+ dev_warn(dev, "IRQ request error %d, using polling\n", ret);
+ goto use_polling;
+ }
+
+ /* Clear & enable card-change interrupts */
+ litex_write32(host->sdirq + LITEX_IRQ_PENDING, SDIRQ_CARD_DETECT);
+ litex_write32(host->sdirq + LITEX_IRQ_ENABLE, SDIRQ_CARD_DETECT);
+
+ return 0;
+
+use_polling:
+ host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+ host->irq = 0;
+ return 0;
+}
+
+static int litex_mmc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct litex_mmc_host *host;
+ struct mmc_host *mmc;
+ struct clk *clk;
+ int ret;
+
+ /*
+ * NOTE: defaults to max_[req,seg]_size=PAGE_SIZE, max_blk_size=512,
+ * and max_blk_count accordingly set to 8;
+ * If for some reason we need to modify max_blk_count, we must also
+ * re-calculate `max_[req,seg]_size = max_blk_size * max_blk_count;`
+ */
+ mmc = devm_mmc_alloc_host(dev, sizeof(*host));
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ /* Initialize clock source */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "can't get clock\n");
+ host->ref_clk = clk_get_rate(clk);
+ host->sd_clk = 0;
+
+ /*
+ * LiteSDCard only supports 4-bit bus width; therefore, we MUST inject
+ * a SET_BUS_WIDTH (acmd6) before the very first data transfer, earlier
+ * than when the mmc subsystem would normally get around to it!
+ */
+ host->is_bus_width_set = false;
+ host->app_cmd = false;
+
+ /* LiteSDCard can support 64-bit DMA addressing */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ host->buf_size = mmc->max_req_size * 2;
+ host->buffer = dmam_alloc_coherent(dev, host->buf_size,
+ &host->dma, GFP_KERNEL);
+ if (host->buffer == NULL)
+ return -ENOMEM;
+
+ host->sdphy = devm_platform_ioremap_resource_byname(pdev, "phy");
+ if (IS_ERR(host->sdphy))
+ return PTR_ERR(host->sdphy);
+
+ host->sdcore = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(host->sdcore))
+ return PTR_ERR(host->sdcore);
+
+ host->sdreader = devm_platform_ioremap_resource_byname(pdev, "reader");
+ if (IS_ERR(host->sdreader))
+ return PTR_ERR(host->sdreader);
+
+ host->sdwriter = devm_platform_ioremap_resource_byname(pdev, "writer");
+ if (IS_ERR(host->sdwriter))
+ return PTR_ERR(host->sdwriter);
+
+ /* Ensure DMA bus masters are disabled */
+ litex_write8(host->sdreader + LITEX_BLK2MEM_ENA, 0);
+ litex_write8(host->sdwriter + LITEX_MEM2BLK_ENA, 0);
+
+ init_completion(&host->cmd_done);
+ ret = litex_mmc_irq_init(pdev, host);
+ if (ret)
+ return ret;
+
+ mmc->ops = &litex_mmc_ops;
+
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret || mmc->ocr_avail == 0) {
+ dev_warn(dev, "can't get voltage, defaulting to 3.3V\n");
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ }
+
+ /*
+ * Set default sd_clk frequency range based on empirical observations
+ * of LiteSDCard gateware behavior on typical SDCard media
+ */
+ mmc->f_min = 12.5e6;
+ mmc->f_max = 50e6;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ return ret;
+
+ /* Force 4-bit bus_width (only width supported by hardware) */
+ mmc->caps &= ~MMC_CAP_8_BIT_DATA;
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ /* Set default capabilities */
+ mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
+ MMC_CAP_DRIVER_TYPE_D |
+ MMC_CAP_CMD23;
+ mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT |
+ MMC_CAP2_NO_SDIO |
+ MMC_CAP2_NO_MMC;
+
+ platform_set_drvdata(pdev, host);
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "LiteX MMC controller initialized.\n");
+ return 0;
+}
+
+static void litex_mmc_remove(struct platform_device *pdev)
+{
+ struct litex_mmc_host *host = platform_get_drvdata(pdev);
+
+ mmc_remove_host(host->mmc);
+}
+
+static const struct of_device_id litex_match[] = {
+ { .compatible = "litex,mmc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, litex_match);
+
+static struct platform_driver litex_mmc_driver = {
+ .probe = litex_mmc_probe,
+ .remove = litex_mmc_remove,
+ .driver = {
+ .name = "litex-mmc",
+ .of_match_table = litex_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(litex_mmc_driver);
+
+MODULE_DESCRIPTION("LiteX SDCard driver");
+MODULE_AUTHOR("Antmicro <contact@antmicro.com>");
+MODULE_AUTHOR("Kamil Rakoczy <krakoczy@antmicro.com>");
+MODULE_AUTHOR("Maciej Dudek <mdudek@internships.antmicro.com>");
+MODULE_AUTHOR("Paul Mackerras <paulus@ozlabs.org>");
+MODULE_AUTHOR("Gabriel Somlo <gsomlo@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/loongson2-mmc.c b/drivers/mmc/host/loongson2-mmc.c
new file mode 100644
index 000000000000..da3daab5f3d6
--- /dev/null
+++ b/drivers/mmc/host/loongson2-mmc.c
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Loongson-2K MMC/SDIO controller driver
+ *
+ * Copyright (C) 2018-2025 Loongson Technology Corporation Limited.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitrev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define LOONGSON2_MMC_REG_CTL 0x00 /* Control Register */
+#define LOONGSON2_MMC_REG_PRE 0x04 /* Prescaler Register */
+#define LOONGSON2_MMC_REG_CARG 0x08 /* Command Register */
+#define LOONGSON2_MMC_REG_CCTL 0x0c /* Command Control Register */
+#define LOONGSON2_MMC_REG_CSTS 0x10 /* Command Status Register */
+#define LOONGSON2_MMC_REG_RSP0 0x14 /* Command Response Register 0 */
+#define LOONGSON2_MMC_REG_RSP1 0x18 /* Command Response Register 1 */
+#define LOONGSON2_MMC_REG_RSP2 0x1c /* Command Response Register 2 */
+#define LOONGSON2_MMC_REG_RSP3 0x20 /* Command Response Register 3 */
+#define LOONGSON2_MMC_REG_TIMER 0x24 /* Data Timeout Register */
+#define LOONGSON2_MMC_REG_BSIZE 0x28 /* Block Size Register */
+#define LOONGSON2_MMC_REG_DCTL 0x2c /* Data Control Register */
+#define LOONGSON2_MMC_REG_DCNT 0x30 /* Data Counter Register */
+#define LOONGSON2_MMC_REG_DSTS 0x34 /* Data Status Register */
+#define LOONGSON2_MMC_REG_FSTS 0x38 /* FIFO Status Register */
+#define LOONGSON2_MMC_REG_INT 0x3c /* Interrupt Register */
+#define LOONGSON2_MMC_REG_DATA 0x40 /* Data Register */
+#define LOONGSON2_MMC_REG_IEN 0x64 /* Interrupt Enable Register */
+
+/* EMMC DLL Mode Registers */
+#define LOONGSON2_MMC_REG_DLLVAL 0xf0 /* DLL Master Lock-value Register */
+#define LOONGSON2_MMC_REG_DLLCTL 0xf4 /* DLL Control Register */
+#define LOONGSON2_MMC_REG_DELAY 0xf8 /* DLL Delayed Parameter Register */
+#define LOONGSON2_MMC_REG_SEL 0xfc /* Bus Mode Selection Register */
+
+/* Exclusive DMA R/W Registers */
+#define LOONGSON2_MMC_REG_WDMA_LO 0x400
+#define LOONGSON2_MMC_REG_WDMA_HI 0x404
+#define LOONGSON2_MMC_REG_RDMA_LO 0x800
+#define LOONGSON2_MMC_REG_RDMA_HI 0x804
+
+/* Bitfields of control register */
+#define LOONGSON2_MMC_CTL_ENCLK BIT(0)
+#define LOONGSON2_MMC_CTL_EXTCLK BIT(1)
+#define LOONGSON2_MMC_CTL_RESET BIT(8)
+
+/* Bitfields of prescaler register */
+#define LOONGSON2_MMC_PRE GENMASK(9, 0)
+#define LOONGSON2_MMC_PRE_EN BIT(31)
+
+/* Bitfields of command control register */
+#define LOONGSON2_MMC_CCTL_INDEX GENMASK(5, 0)
+#define LOONGSON2_MMC_CCTL_HOST BIT(6)
+#define LOONGSON2_MMC_CCTL_START BIT(8)
+#define LOONGSON2_MMC_CCTL_WAIT_RSP BIT(9)
+#define LOONGSON2_MMC_CCTL_LONG_RSP BIT(10)
+#define LOONGSON2_MMC_CCTL_ABORT BIT(12)
+#define LOONGSON2_MMC_CCTL_CHECK BIT(13)
+#define LOONGSON2_MMC_CCTL_SDIO BIT(14)
+#define LOONGSON2_MMC_CCTL_CMD6 BIT(18)
+
+/* Bitfields of command status register */
+#define LOONGSON2_MMC_CSTS_INDEX GENMASK(7, 0)
+#define LOONGSON2_MMC_CSTS_ON BIT(8)
+#define LOONGSON2_MMC_CSTS_RSP BIT(9)
+#define LOONGSON2_MMC_CSTS_TIMEOUT BIT(10)
+#define LOONGSON2_MMC_CSTS_END BIT(11)
+#define LOONGSON2_MMC_CSTS_CRC_ERR BIT(12)
+#define LOONGSON2_MMC_CSTS_AUTO_STOP BIT(13)
+#define LOONGSON2_MMC_CSTS_FIN BIT(14)
+
+/* Bitfields of data timeout register */
+#define LOONGSON2_MMC_DTIMR GENMASK(23, 0)
+
+/* Bitfields of block size register */
+#define LOONGSON2_MMC_BSIZE GENMASK(11, 0)
+
+/* Bitfields of data control register */
+#define LOONGSON2_MMC_DCTL_BNUM GENMASK(11, 0)
+#define LOONGSON2_MMC_DCTL_START BIT(14)
+#define LOONGSON2_MMC_DCTL_ENDMA BIT(15)
+#define LOONGSON2_MMC_DCTL_WIDE BIT(16)
+#define LOONGSON2_MMC_DCTL_RWAIT BIT(17)
+#define LOONGSON2_MMC_DCTL_IO_SUSPEND BIT(18)
+#define LOONGSON2_MMC_DCTL_IO_RESUME BIT(19)
+#define LOONGSON2_MMC_DCTL_RW_RESUME BIT(20)
+#define LOONGSON2_MMC_DCTL_8BIT_BUS BIT(26)
+
+/* Bitfields of sata counter register */
+#define LOONGSON2_MMC_DCNT_BNUM GENMASK(11, 0)
+#define LOONGSON2_MMC_DCNT_BYTE GENMASK(23, 12)
+
+/* Bitfields of command status register */
+#define LOONGSON2_MMC_DSTS_RXON BIT(0)
+#define LOONGSON2_MMC_DSTS_TXON BIT(1)
+#define LOONGSON2_MMC_DSTS_SBITERR BIT(2)
+#define LOONGSON2_MMC_DSTS_BUSYFIN BIT(3)
+#define LOONGSON2_MMC_DSTS_XFERFIN BIT(4)
+#define LOONGSON2_MMC_DSTS_DTIMEOUT BIT(5)
+#define LOONGSON2_MMC_DSTS_RXCRC BIT(6)
+#define LOONGSON2_MMC_DSTS_TXCRC BIT(7)
+#define LOONGSON2_MMC_DSTS_IRQ BIT(8)
+#define LOONGSON2_MMC_DSTS_START BIT(13)
+#define LOONGSON2_MMC_DSTS_RESUME BIT(15)
+#define LOONGSON2_MMC_DSTS_SUSPEND BIT(16)
+
+/* Bitfields of FIFO Status Register */
+#define LOONGSON2_MMC_FSTS_TXFULL BIT(11)
+
+/* Bitfields of interrupt register */
+#define LOONGSON2_MMC_INT_DFIN BIT(0)
+#define LOONGSON2_MMC_INT_DTIMEOUT BIT(1)
+#define LOONGSON2_MMC_INT_RXCRC BIT(2)
+#define LOONGSON2_MMC_INT_TXCRC BIT(3)
+#define LOONGSON2_MMC_INT_PROGERR BIT(4)
+#define LOONGSON2_MMC_INT_SDIOIRQ BIT(5)
+#define LOONGSON2_MMC_INT_CSENT BIT(6)
+#define LOONGSON2_MMC_INT_CTIMEOUT BIT(7)
+#define LOONGSON2_MMC_INT_RESPCRC BIT(8)
+#define LOONGSON2_MMC_INT_BUSYEND BIT(9)
+
+/* Bitfields of interrupt enable register */
+#define LOONGSON2_MMC_IEN_DFIN BIT(0)
+#define LOONGSON2_MMC_IEN_DTIMEOUT BIT(1)
+#define LOONGSON2_MMC_IEN_RXCRC BIT(2)
+#define LOONGSON2_MMC_IEN_TXCRC BIT(3)
+#define LOONGSON2_MMC_IEN_PROGERR BIT(4)
+#define LOONGSON2_MMC_IEN_SDIOIRQ BIT(5)
+#define LOONGSON2_MMC_IEN_CSENT BIT(6)
+#define LOONGSON2_MMC_IEN_CTIMEOUT BIT(7)
+#define LOONGSON2_MMC_IEN_RESPCRC BIT(8)
+#define LOONGSON2_MMC_IEN_BUSYEND BIT(9)
+
+#define LOONGSON2_MMC_IEN_ALL GENMASK(9, 0)
+#define LOONGSON2_MMC_INT_CLEAR GENMASK(9, 0)
+
+/* Bitfields of DLL master lock-value register */
+#define LOONGSON2_MMC_DLLVAL_DONE BIT(8)
+
+/* Bitfields of DLL control register */
+#define LOONGSON2_MMC_DLLCTL_TIME GENMASK(7, 0)
+#define LOONGSON2_MMC_DLLCTL_INCRE GENMASK(15, 8)
+#define LOONGSON2_MMC_DLLCTL_START GENMASK(23, 16)
+#define LOONGSON2_MMC_DLLCTL_CLK_MODE BIT(24)
+#define LOONGSON2_MMC_DLLCTL_START_BIT BIT(25)
+#define LOONGSON2_MMC_DLLCTL_TIME_BPASS GENMASK(29, 26)
+
+#define LOONGSON2_MMC_DELAY_PAD GENMASK(7, 0)
+#define LOONGSON2_MMC_DELAY_RD GENMASK(15, 8)
+
+#define LOONGSON2_MMC_SEL_DATA BIT(0) /* 0: SDR, 1: DDR */
+#define LOONGSON2_MMC_SEL_BUS BIT(0) /* 0: EMMC, 1: SDIO */
+
+/* Internal dma controller registers */
+
+/* Bitfields of Global Configuration Register */
+#define LOONGSON2_MMC_DMA_64BIT_EN BIT(0) /* 1: 64 bit support */
+#define LOONGSON2_MMC_DMA_UNCOHERENT_EN BIT(1) /* 0: cache, 1: uncache */
+#define LOONGSON2_MMC_DMA_ASK_VALID BIT(2)
+#define LOONGSON2_MMC_DMA_START BIT(3) /* DMA start operation */
+#define LOONGSON2_MMC_DMA_STOP BIT(4) /* DMA stop operation */
+#define LOONGSON2_MMC_DMA_CONFIG_MASK GENMASK_ULL(4, 0) /* DMA controller config bits mask */
+
+/* Bitfields of ndesc_addr field of HW descriptor */
+#define LOONGSON2_MMC_DMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
+#define LOONGSON2_MMC_DMA_DESC_ADDR_LOW GENMASK(31, 1)
+
+/* Bitfields of cmd field of HW descriptor */
+#define LOONGSON2_MMC_DMA_INT BIT(1) /* Enable DMA interrupts */
+#define LOONGSON2_MMC_DMA_DATA_DIR BIT(12) /* 1: write to device, 0: read from device */
+
+#define LOONGSON2_MMC_DLLVAL_TIMEOUT_US 4000
+#define LOONGSON2_MMC_TXFULL_TIMEOUT_US 500
+
+/* Loongson-2K1000 SDIO2 DMA routing register */
+#define LS2K1000_SDIO_DMA_MASK GENMASK(17, 15)
+#define LS2K1000_DMA0_CONF 0x0
+#define LS2K1000_DMA1_CONF 0x1
+#define LS2K1000_DMA2_CONF 0x2
+#define LS2K1000_DMA3_CONF 0x3
+#define LS2K1000_DMA4_CONF 0x4
+
+/* Loongson-2K0500 SDIO2 DMA routing register */
+#define LS2K0500_SDIO_DMA_MASK GENMASK(15, 14)
+#define LS2K0500_DMA0_CONF 0x1
+#define LS2K0500_DMA1_CONF 0x2
+#define LS2K0500_DMA2_CONF 0x3
+
+enum loongson2_mmc_state {
+ STATE_NONE,
+ STATE_FINALIZE,
+ STATE_CMDSENT,
+ STATE_RSPFIN,
+ STATE_XFERFINISH,
+ STATE_XFERFINISH_RSPFIN,
+};
+
+struct loongson2_dma_desc {
+ u32 ndesc_addr;
+ u32 mem_addr;
+ u32 apb_addr;
+ u32 len;
+ u32 step_len;
+ u32 step_times;
+ u32 cmd;
+ u32 stats;
+ u32 high_ndesc_addr;
+ u32 high_mem_addr;
+ u32 reserved[2];
+} __packed;
+
+struct loongson2_mmc_host {
+ struct device *dev;
+ struct mmc_request *mrq;
+ struct regmap *regmap;
+ struct resource *res;
+ struct clk *clk;
+ u32 current_clk;
+ void *sg_cpu;
+ dma_addr_t sg_dma;
+ int dma_complete;
+ struct dma_chan *chan;
+ int cmd_is_stop;
+ int bus_width;
+ spinlock_t lock; /* Prevent races with irq handler */
+ enum loongson2_mmc_state state;
+ const struct loongson2_mmc_pdata *pdata;
+};
+
+struct loongson2_mmc_pdata {
+ const struct regmap_config *regmap_config;
+ void (*reorder_cmd_data)(struct loongson2_mmc_host *host, struct mmc_command *cmd);
+ void (*fix_data_timeout)(struct loongson2_mmc_host *host, struct mmc_command *cmd);
+ int (*setting_dma)(struct loongson2_mmc_host *host, struct platform_device *pdev);
+ int (*prepare_dma)(struct loongson2_mmc_host *host, struct mmc_data *data);
+ void (*release_dma)(struct loongson2_mmc_host *host, struct device *dev);
+};
+
+static void loongson2_mmc_send_command(struct loongson2_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ u32 cctrl;
+
+ if (cmd->data)
+ host->state = STATE_XFERFINISH_RSPFIN;
+ else if (cmd->flags & MMC_RSP_PRESENT)
+ host->state = STATE_RSPFIN;
+ else
+ host->state = STATE_CMDSENT;
+
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_CARG, cmd->arg);
+
+ cctrl = FIELD_PREP(LOONGSON2_MMC_CCTL_INDEX, cmd->opcode);
+ cctrl |= LOONGSON2_MMC_CCTL_HOST | LOONGSON2_MMC_CCTL_START;
+
+ if (cmd->opcode == SD_SWITCH && cmd->data)
+ cctrl |= LOONGSON2_MMC_CCTL_CMD6;
+
+ if (cmd->flags & MMC_RSP_PRESENT)
+ cctrl |= LOONGSON2_MMC_CCTL_WAIT_RSP;
+
+ if (cmd->flags & MMC_RSP_136)
+ cctrl |= LOONGSON2_MMC_CCTL_LONG_RSP;
+
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_CCTL, cctrl);
+}
+
+static int loongson2_mmc_setup_data(struct loongson2_mmc_host *host,
+ struct mmc_data *data)
+{
+ u32 dctrl;
+
+ if ((data->blksz & 3) != 0)
+ return -EINVAL;
+
+ dctrl = FIELD_PREP(LOONGSON2_MMC_DCTL_BNUM, data->blocks);
+ dctrl |= LOONGSON2_MMC_DCTL_START | LOONGSON2_MMC_DCTL_ENDMA;
+
+ if (host->bus_width == MMC_BUS_WIDTH_4)
+ dctrl |= LOONGSON2_MMC_DCTL_WIDE;
+ else if (host->bus_width == MMC_BUS_WIDTH_8)
+ dctrl |= LOONGSON2_MMC_DCTL_8BIT_BUS;
+
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_DCTL, dctrl);
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_BSIZE, data->blksz);
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_TIMER, U32_MAX);
+
+ return 0;
+}
+
+static int loongson2_mmc_prepare_dma(struct loongson2_mmc_host *host,
+ struct mmc_data *data)
+{
+ int ret;
+
+ if (!data)
+ return 0;
+
+ ret = loongson2_mmc_setup_data(host, data);
+ if (ret)
+ return ret;
+
+ host->dma_complete = 0;
+
+ return host->pdata->prepare_dma(host, data);
+}
+
+static void loongson2_mmc_send_request(struct mmc_host *mmc)
+{
+ int ret;
+ struct loongson2_mmc_host *host = mmc_priv(mmc);
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
+
+ ret = loongson2_mmc_prepare_dma(host, cmd->data);
+ if (ret) {
+ dev_err(host->dev, "DMA data prepared failed with %d\n", ret);
+ cmd->error = ret;
+ cmd->data->error = ret;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ if (host->pdata->fix_data_timeout)
+ host->pdata->fix_data_timeout(host, cmd);
+
+ loongson2_mmc_send_command(host, cmd);
+
+ /* Fix deselect card */
+ if (cmd->opcode == MMC_SELECT_CARD && cmd->arg == 0) {
+ cmd->error = 0;
+ mmc_request_done(mmc, mrq);
+ }
+}
+
+static irqreturn_t loongson2_mmc_irq_worker(int irq, void *devid)
+{
+ struct loongson2_mmc_host *host = (struct loongson2_mmc_host *)devid;
+ struct mmc_host *mmc = mmc_from_priv(host);
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
+
+ if (cmd->data)
+ dma_unmap_sg(mmc_dev(mmc), cmd->data->sg, cmd->data->sg_len,
+ mmc_get_dma_dir(cmd->data));
+
+ if (cmd->data && !cmd->error &&
+ !cmd->data->error && !host->dma_complete)
+ return IRQ_HANDLED;
+
+ /* Read response from controller. */
+ regmap_read(host->regmap, LOONGSON2_MMC_REG_RSP0, &cmd->resp[0]);
+ regmap_read(host->regmap, LOONGSON2_MMC_REG_RSP1, &cmd->resp[1]);
+ regmap_read(host->regmap, LOONGSON2_MMC_REG_RSP2, &cmd->resp[2]);
+ regmap_read(host->regmap, LOONGSON2_MMC_REG_RSP3, &cmd->resp[3]);
+
+ /* Cleanup controller */
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_CARG, 0);
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_CCTL, 0);
+
+ if (cmd->data && cmd->error)
+ cmd->data->error = cmd->error;
+
+ if (cmd->data && cmd->data->stop && !host->cmd_is_stop) {
+ host->cmd_is_stop = 1;
+ loongson2_mmc_send_request(mmc);
+ return IRQ_HANDLED;
+ }
+
+ /* If we have no data transfer we are finished here */
+ if (!mrq->data)
+ goto request_done;
+
+ /* Calculate the amount of bytes transfer if there was no error */
+ if (mrq->data->error == 0) {
+ mrq->data->bytes_xfered =
+ (mrq->data->blocks * mrq->data->blksz);
+ } else {
+ mrq->data->bytes_xfered = 0;
+ }
+
+request_done:
+ host->state = STATE_NONE;
+ host->mrq = NULL;
+ mmc_request_done(mmc, mrq);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t loongson2_mmc_irq(int irq, void *devid)
+{
+ struct loongson2_mmc_host *host = (struct loongson2_mmc_host *)devid;
+ struct mmc_host *mmc = mmc_from_priv(host);
+ struct mmc_command *cmd;
+ unsigned long iflags;
+ u32 dsts, imsk;
+
+ regmap_read(host->regmap, LOONGSON2_MMC_REG_INT, &imsk);
+ regmap_read(host->regmap, LOONGSON2_MMC_REG_DSTS, &dsts);
+
+ if ((dsts & LOONGSON2_MMC_DSTS_IRQ) &&
+ (imsk & LOONGSON2_MMC_INT_SDIOIRQ)) {
+ regmap_update_bits(host->regmap, LOONGSON2_MMC_REG_INT,
+ LOONGSON2_MMC_INT_SDIOIRQ, LOONGSON2_MMC_INT_SDIOIRQ);
+
+ sdio_signal_irq(mmc);
+ return IRQ_HANDLED;
+ }
+
+ spin_lock_irqsave(&host->lock, iflags);
+
+ if (host->state == STATE_NONE || host->state == STATE_FINALIZE || !host->mrq)
+ goto irq_out;
+
+ cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd;
+ if (!cmd)
+ goto irq_out;
+
+ cmd->error = 0;
+
+ if (imsk & LOONGSON2_MMC_INT_CTIMEOUT) {
+ cmd->error = -ETIMEDOUT;
+ goto close_transfer;
+ }
+
+ if (imsk & LOONGSON2_MMC_INT_CSENT) {
+ if (host->state == STATE_RSPFIN || host->state == STATE_CMDSENT)
+ goto close_transfer;
+
+ if (host->state == STATE_XFERFINISH_RSPFIN)
+ host->state = STATE_XFERFINISH;
+ }
+
+ if (!cmd->data)
+ goto irq_out;
+
+ if (imsk & (LOONGSON2_MMC_INT_RXCRC | LOONGSON2_MMC_INT_TXCRC)) {
+ cmd->data->error = -EILSEQ;
+ goto close_transfer;
+ }
+
+ if (imsk & LOONGSON2_MMC_INT_DTIMEOUT) {
+ cmd->data->error = -ETIMEDOUT;
+ goto close_transfer;
+ }
+
+ if (imsk & LOONGSON2_MMC_INT_DFIN) {
+ if (host->state == STATE_XFERFINISH) {
+ host->dma_complete = 1;
+ goto close_transfer;
+ }
+
+ if (host->state == STATE_XFERFINISH_RSPFIN)
+ host->state = STATE_RSPFIN;
+ }
+
+irq_out:
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_INT, imsk);
+ spin_unlock_irqrestore(&host->lock, iflags);
+ return IRQ_HANDLED;
+
+close_transfer:
+ host->state = STATE_FINALIZE;
+ host->pdata->reorder_cmd_data(host, cmd);
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_INT, imsk);
+ spin_unlock_irqrestore(&host->lock, iflags);
+ return IRQ_WAKE_THREAD;
+}
+
+static void loongson2_mmc_dll_mode_init(struct loongson2_mmc_host *host)
+{
+ u32 val, pad_delay, delay;
+ int ret;
+
+ regmap_update_bits(host->regmap, LOONGSON2_MMC_REG_SEL,
+ LOONGSON2_MMC_SEL_DATA, LOONGSON2_MMC_SEL_DATA);
+
+ val = FIELD_PREP(LOONGSON2_MMC_DLLCTL_TIME, 0xc8)
+ | FIELD_PREP(LOONGSON2_MMC_DLLCTL_INCRE, 0x1)
+ | FIELD_PREP(LOONGSON2_MMC_DLLCTL_START, 0x1)
+ | FIELD_PREP(LOONGSON2_MMC_DLLCTL_CLK_MODE, 0x1)
+ | FIELD_PREP(LOONGSON2_MMC_DLLCTL_START_BIT, 0x1)
+ | FIELD_PREP(LOONGSON2_MMC_DLLCTL_TIME_BPASS, 0xf);
+
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_DLLCTL, val);
+
+ ret = regmap_read_poll_timeout(host->regmap, LOONGSON2_MMC_REG_DLLVAL, val,
+ (val & LOONGSON2_MMC_DLLVAL_DONE), 0,
+ LOONGSON2_MMC_DLLVAL_TIMEOUT_US);
+ if (ret < 0)
+ return;
+
+ regmap_read(host->regmap, LOONGSON2_MMC_REG_DLLVAL, &val);
+ pad_delay = FIELD_GET(GENMASK(7, 1), val);
+
+ delay = FIELD_PREP(LOONGSON2_MMC_DELAY_PAD, pad_delay)
+ | FIELD_PREP(LOONGSON2_MMC_DELAY_RD, pad_delay + 1);
+
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_DELAY, delay);
+}
+
+static void loongson2_mmc_set_clk(struct loongson2_mmc_host *host, struct mmc_ios *ios)
+{
+ u32 pre;
+
+ pre = DIV_ROUND_UP(host->current_clk, ios->clock);
+ if (pre > 255)
+ pre = 255;
+
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_PRE, pre | LOONGSON2_MMC_PRE_EN);
+
+ regmap_update_bits(host->regmap, LOONGSON2_MMC_REG_CTL,
+ LOONGSON2_MMC_CTL_ENCLK, LOONGSON2_MMC_CTL_ENCLK);
+
+ /* EMMC DLL mode setting */
+ if (ios->timing == MMC_TIMING_UHS_DDR50 || ios->timing == MMC_TIMING_MMC_DDR52)
+ loongson2_mmc_dll_mode_init(host);
+}
+
+static void loongson2_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct loongson2_mmc_host *host = mmc_priv(mmc);
+ int ret;
+
+ if (ios->power_mode == MMC_POWER_UP) {
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ if (ret) {
+ dev_err(host->dev, "failed to enable vmmc regulator\n");
+ return; /* return, if failed turn on vmmc */
+ }
+ }
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_CTL, LOONGSON2_MMC_CTL_RESET);
+ mdelay(10);
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_CTL, LOONGSON2_MMC_CTL_EXTCLK);
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_INT, LOONGSON2_MMC_IEN_ALL);
+ regmap_write(host->regmap, LOONGSON2_MMC_REG_IEN, LOONGSON2_MMC_INT_CLEAR);
+ } else if (ios->power_mode == MMC_POWER_OFF) {
+ regmap_update_bits(host->regmap, LOONGSON2_MMC_REG_CTL,
+ LOONGSON2_MMC_CTL_RESET, LOONGSON2_MMC_CTL_RESET);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ return;
+ }
+
+ loongson2_mmc_set_clk(host, ios);
+
+ host->bus_width = ios->bus_width;
+}
+
+static void loongson2_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct loongson2_mmc_host *host = mmc_priv(mmc);
+
+ host->cmd_is_stop = 0;
+ host->mrq = mrq;
+ loongson2_mmc_send_request(mmc);
+}
+
+static void loongson2_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct loongson2_mmc_host *host = mmc_priv(mmc);
+
+ regmap_update_bits(host->regmap, LOONGSON2_MMC_REG_IEN, LOONGSON2_MMC_INT_SDIOIRQ, enable);
+}
+
+static void loongson2_mmc_ack_sdio_irq(struct mmc_host *mmc)
+{
+ loongson2_mmc_enable_sdio_irq(mmc, 1);
+}
+
+static struct mmc_host_ops loongson2_mmc_ops = {
+ .request = loongson2_mmc_request,
+ .set_ios = loongson2_mmc_set_ios,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmc_gpio_get_cd,
+ .enable_sdio_irq = loongson2_mmc_enable_sdio_irq,
+ .ack_sdio_irq = loongson2_mmc_ack_sdio_irq,
+};
+
+static const struct regmap_config ls2k0500_mmc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = LOONGSON2_MMC_REG_IEN,
+};
+
+static int loongson2_reorder_cmd_list[] = { SD_APP_SEND_SCR, SD_APP_SEND_NUM_WR_BLKS,
+ SD_APP_SD_STATUS, MMC_SEND_WRITE_PROT, SD_SWITCH };
+
+/*
+ * According to SD spec, ACMD13, ACMD22, ACMD51 and CMD30
+ * response datas has different byte order with usual data packets.
+ * However sdio controller will send these datas in usual data format,
+ * so we need to adjust these datas to a protocol consistent byte order.
+ */
+static void ls2k0500_mmc_reorder_cmd_data(struct loongson2_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ struct scatterlist *sg;
+ u32 *data;
+ int i, j;
+
+ if (mmc_cmd_type(cmd) != MMC_CMD_ADTC)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(loongson2_reorder_cmd_list); i++)
+ if (cmd->opcode == loongson2_reorder_cmd_list[i])
+ break;
+
+ if (i == ARRAY_SIZE(loongson2_reorder_cmd_list))
+ return;
+
+ for_each_sg(cmd->data->sg, sg, cmd->data->sg_len, i) {
+ data = sg_virt(&sg[i]);
+ for (j = 0; j < (sg_dma_len(&sg[i]) / 4); j++)
+ if (cmd->opcode == SD_SWITCH)
+ data[j] = bitrev8x4(data[j]);
+ else
+ data[j] = (__force u32)cpu_to_be32(data[j]);
+ }
+}
+
+static int loongson2_mmc_prepare_external_dma(struct loongson2_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct mmc_host *mmc = mmc_from_priv(host);
+ struct dma_slave_config dma_conf = { };
+ struct dma_async_tx_descriptor *desc;
+ int ret;
+
+ ret = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (!ret)
+ return -ENOMEM;
+
+ dma_conf.src_addr = host->res->start + LOONGSON2_MMC_REG_DATA,
+ dma_conf.dst_addr = host->res->start + LOONGSON2_MMC_REG_DATA,
+ dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ dma_conf.direction = !(data->flags & MMC_DATA_WRITE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+
+ dmaengine_slave_config(host->chan, &dma_conf);
+ desc = dmaengine_prep_slave_sg(host->chan, data->sg, data->sg_len,
+ dma_conf.direction,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc)
+ goto unmap_exit;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->chan);
+
+ return 0;
+
+unmap_exit:
+ dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, mmc_get_dma_dir(data));
+ return -ENOMEM;
+}
+
+static void loongson2_mmc_release_external_dma(struct loongson2_mmc_host *host,
+ struct device *dev)
+{
+ dma_release_channel(host->chan);
+}
+
+static int ls2k0500_mmc_set_external_dma(struct loongson2_mmc_host *host,
+ struct platform_device *pdev)
+{
+ int ret, val;
+ void __iomem *regs;
+
+ regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ val = readl(regs);
+ val |= FIELD_PREP(LS2K0500_SDIO_DMA_MASK, LS2K0500_DMA2_CONF);
+ writel(val, regs);
+
+ host->chan = dma_request_chan(&pdev->dev, "rx-tx");
+ ret = PTR_ERR_OR_ZERO(host->chan);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot get DMA channel.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct loongson2_mmc_pdata ls2k0500_mmc_pdata = {
+ .regmap_config = &ls2k0500_mmc_regmap_config,
+ .reorder_cmd_data = ls2k0500_mmc_reorder_cmd_data,
+ .setting_dma = ls2k0500_mmc_set_external_dma,
+ .prepare_dma = loongson2_mmc_prepare_external_dma,
+ .release_dma = loongson2_mmc_release_external_dma,
+};
+
+static int ls2k1000_mmc_set_external_dma(struct loongson2_mmc_host *host,
+ struct platform_device *pdev)
+{
+ int ret, val;
+ void __iomem *regs;
+
+ regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ val = readl(regs);
+ val |= FIELD_PREP(LS2K1000_SDIO_DMA_MASK, LS2K1000_DMA1_CONF);
+ writel(val, regs);
+
+ host->chan = dma_request_chan(&pdev->dev, "rx-tx");
+ ret = PTR_ERR_OR_ZERO(host->chan);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot get DMA channel.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct loongson2_mmc_pdata ls2k1000_mmc_pdata = {
+ .regmap_config = &ls2k0500_mmc_regmap_config,
+ .reorder_cmd_data = ls2k0500_mmc_reorder_cmd_data,
+ .setting_dma = ls2k1000_mmc_set_external_dma,
+ .prepare_dma = loongson2_mmc_prepare_external_dma,
+ .release_dma = loongson2_mmc_release_external_dma,
+};
+
+static const struct regmap_config ls2k2000_mmc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = LOONGSON2_MMC_REG_RDMA_HI,
+};
+
+static void ls2k2000_mmc_reorder_cmd_data(struct loongson2_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ struct scatterlist *sg;
+ u32 *data;
+ int i, j;
+
+ if (cmd->opcode != SD_SWITCH || mmc_cmd_type(cmd) != MMC_CMD_ADTC)
+ return;
+
+ for_each_sg(cmd->data->sg, sg, cmd->data->sg_len, i) {
+ data = sg_virt(&sg[i]);
+ for (j = 0; j < (sg_dma_len(&sg[i]) / 4); j++)
+ data[j] = bitrev8x4(data[j]);
+ }
+}
+
+/*
+ * This is a controller hardware defect. Single/multiple block write commands
+ * must be sent after the TX FULL flag is set, otherwise a data timeout interrupt
+ * will occur.
+ */
+static void ls2k2000_mmc_fix_data_timeout(struct loongson2_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ int val;
+
+ if (cmd->opcode != MMC_WRITE_BLOCK && cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
+ return;
+
+ regmap_read_poll_timeout(host->regmap, LOONGSON2_MMC_REG_FSTS, val,
+ (val & LOONGSON2_MMC_FSTS_TXFULL), 0,
+ LOONGSON2_MMC_TXFULL_TIMEOUT_US);
+}
+
+static int loongson2_mmc_prepare_internal_dma(struct loongson2_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct loongson2_dma_desc *pdes = (struct loongson2_dma_desc *)host->sg_cpu;
+ struct mmc_host *mmc = mmc_from_priv(host);
+ dma_addr_t next_desc = host->sg_dma;
+ struct scatterlist *sg;
+ int reg_lo, reg_hi;
+ u64 dma_order;
+ int i, ret;
+
+ ret = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (!ret)
+ return -ENOMEM;
+
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ pdes[i].len = sg_dma_len(&sg[i]) / 4;
+ pdes[i].step_len = 0;
+ pdes[i].step_times = 1;
+ pdes[i].mem_addr = lower_32_bits(sg_dma_address(&sg[i]));
+ pdes[i].high_mem_addr = upper_32_bits(sg_dma_address(&sg[i]));
+ pdes[i].apb_addr = host->res->start + LOONGSON2_MMC_REG_DATA;
+ pdes[i].cmd = LOONGSON2_MMC_DMA_INT;
+
+ if (data->flags & MMC_DATA_READ) {
+ reg_lo = LOONGSON2_MMC_REG_RDMA_LO;
+ reg_hi = LOONGSON2_MMC_REG_RDMA_HI;
+ } else {
+ pdes[i].cmd |= LOONGSON2_MMC_DMA_DATA_DIR;
+ reg_lo = LOONGSON2_MMC_REG_WDMA_LO;
+ reg_hi = LOONGSON2_MMC_REG_WDMA_HI;
+ }
+
+ next_desc += sizeof(struct loongson2_dma_desc);
+ pdes[i].ndesc_addr = lower_32_bits(next_desc) |
+ LOONGSON2_MMC_DMA_DESC_EN;
+ pdes[i].high_ndesc_addr = upper_32_bits(next_desc);
+ }
+
+ /* Setting the last descriptor enable bit */
+ pdes[i - 1].ndesc_addr &= ~LOONGSON2_MMC_DMA_DESC_EN;
+
+ dma_order = (host->sg_dma & ~LOONGSON2_MMC_DMA_CONFIG_MASK) |
+ LOONGSON2_MMC_DMA_64BIT_EN |
+ LOONGSON2_MMC_DMA_START;
+
+ regmap_write(host->regmap, reg_hi, upper_32_bits(dma_order));
+ regmap_write(host->regmap, reg_lo, lower_32_bits(dma_order));
+
+ return 0;
+}
+
+static int ls2k2000_mmc_set_internal_dma(struct loongson2_mmc_host *host,
+ struct platform_device *pdev)
+{
+ host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &host->sg_dma, GFP_KERNEL);
+ if (!host->sg_cpu)
+ return -ENOMEM;
+
+ memset(host->sg_cpu, 0, PAGE_SIZE);
+ return 0;
+}
+
+static void loongson2_mmc_release_internal_dma(struct loongson2_mmc_host *host,
+ struct device *dev)
+{
+ dma_free_coherent(dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+}
+
+static struct loongson2_mmc_pdata ls2k2000_mmc_pdata = {
+ .regmap_config = &ls2k2000_mmc_regmap_config,
+ .reorder_cmd_data = ls2k2000_mmc_reorder_cmd_data,
+ .fix_data_timeout = ls2k2000_mmc_fix_data_timeout,
+ .setting_dma = ls2k2000_mmc_set_internal_dma,
+ .prepare_dma = loongson2_mmc_prepare_internal_dma,
+ .release_dma = loongson2_mmc_release_internal_dma,
+};
+
+static int loongson2_mmc_resource_request(struct platform_device *pdev,
+ struct loongson2_mmc_host *host)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ int ret, irq;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &host->res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ host->regmap = devm_regmap_init_mmio(dev, base, host->pdata->regmap_config);
+ if (IS_ERR(host->regmap))
+ return PTR_ERR(host->regmap);
+
+ host->clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
+ if (host->clk) {
+ ret = devm_clk_rate_exclusive_get(dev, host->clk);
+ if (ret)
+ return ret;
+
+ host->current_clk = clk_get_rate(host->clk);
+ } else {
+ /* For ACPI, the clock is accessed via the clock-frequency attribute. */
+ device_property_read_u32(dev, "clock-frequency", &host->current_clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, loongson2_mmc_irq,
+ loongson2_mmc_irq_worker,
+ IRQF_ONESHOT, "loongson2-mmc", host);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ return host->pdata->setting_dma(host, pdev);
+}
+
+static int loongson2_mmc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct loongson2_mmc_host *host;
+ struct mmc_host *mmc;
+ int ret;
+
+ mmc = devm_mmc_alloc_host(dev, sizeof(*host));
+ if (!mmc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mmc);
+
+ host = mmc_priv(mmc);
+ host->state = STATE_NONE;
+ spin_lock_init(&host->lock);
+
+ host->pdata = device_get_match_data(dev);
+ if (!host->pdata)
+ return dev_err_probe(dev, -EINVAL, "Failed to get match data\n");
+
+ ret = loongson2_mmc_resource_request(pdev, host);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request resource\n");
+
+ mmc->ops = &loongson2_mmc_ops;
+ mmc->f_min = DIV_ROUND_UP(host->current_clk, 256);
+ mmc->f_max = host->current_clk;
+ mmc->max_blk_count = 4095;
+ mmc->max_blk_size = 4095;
+ mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
+ mmc->max_segs = 1;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ /* Process SDIO IRQs through the sdio_irq_work. */
+ if (mmc->caps & MMC_CAP_SDIO_IRQ)
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret || mmc->ocr_avail == 0) {
+ dev_warn(dev, "Can't get voltage, defaulting to 3.3V\n");
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ }
+
+ ret = mmc_of_parse(mmc);
+ if (ret) {
+ dev_err(dev, "Failed to parse device node\n");
+ goto free_dma;
+ }
+
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ dev_err(dev, "Failed to add mmc host\n");
+ goto free_dma;
+ }
+
+ return 0;
+
+free_dma:
+ host->pdata->release_dma(host, dev);
+ return ret;
+}
+
+static void loongson2_mmc_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct loongson2_mmc_host *host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+ host->pdata->release_dma(host, &pdev->dev);
+}
+
+static const struct of_device_id loongson2_mmc_of_ids[] = {
+ { .compatible = "loongson,ls2k0500-mmc", .data = &ls2k0500_mmc_pdata },
+ { .compatible = "loongson,ls2k1000-mmc", .data = &ls2k1000_mmc_pdata },
+ { .compatible = "loongson,ls2k2000-mmc", .data = &ls2k2000_mmc_pdata },
+ { },
+};
+MODULE_DEVICE_TABLE(of, loongson2_mmc_of_ids);
+
+static int loongson2_mmc_suspend(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct loongson2_mmc_host *host = mmc_priv(mmc);
+
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+static int loongson2_mmc_resume(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct loongson2_mmc_host *host = mmc_priv(mmc);
+
+ return clk_prepare_enable(host->clk);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(loongson2_mmc_pm_ops, loongson2_mmc_suspend, loongson2_mmc_resume);
+
+static struct platform_driver loongson2_mmc_driver = {
+ .driver = {
+ .name = "loongson2-mmc",
+ .of_match_table = loongson2_mmc_of_ids,
+ .pm = pm_ptr(&loongson2_mmc_pm_ops),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = loongson2_mmc_probe,
+ .remove = loongson2_mmc_remove,
+};
+
+module_platform_driver(loongson2_mmc_driver);
+
+MODULE_DESCRIPTION("Loongson-2K SD/SDIO/eMMC Interface driver");
+MODULE_AUTHOR("Loongson Technology Corporation Limited");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 3f28eb4d17fe..694bb443d5f3 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -11,7 +11,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iopoll.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/dma-mapping.h>
@@ -41,14 +41,17 @@
#define CLK_V2_TX_DELAY_MASK GENMASK(19, 16)
#define CLK_V2_RX_DELAY_MASK GENMASK(23, 20)
#define CLK_V2_ALWAYS_ON BIT(24)
+#define CLK_V2_IRQ_SDIO_SLEEP BIT(25)
#define CLK_V3_TX_DELAY_MASK GENMASK(21, 16)
#define CLK_V3_RX_DELAY_MASK GENMASK(27, 22)
#define CLK_V3_ALWAYS_ON BIT(28)
+#define CLK_V3_IRQ_SDIO_SLEEP BIT(29)
#define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask)
#define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask)
#define CLK_ALWAYS_ON(h) (h->data->always_on)
+#define CLK_IRQ_SDIO_SLEEP(h) (h->data->irq_sdio_sleep)
#define SD_EMMC_DELAY 0x4
#define SD_EMMC_ADJUST 0x8
@@ -101,8 +104,7 @@
#define IRQ_RESP_STATUS BIT(14)
#define IRQ_SDIO BIT(15)
#define IRQ_EN_MASK \
- (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\
- IRQ_SDIO)
+ (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN)
#define SD_EMMC_CMD_CFG 0x50
#define SD_EMMC_CMD_ARG 0x54
@@ -136,6 +138,7 @@ struct meson_mmc_data {
unsigned int rx_delay_mask;
unsigned int always_on;
unsigned int adjust;
+ unsigned int irq_sdio_sleep;
};
struct sd_emmc_desc {
@@ -147,12 +150,11 @@ struct sd_emmc_desc {
struct meson_host {
struct device *dev;
- struct meson_mmc_data *data;
+ const struct meson_mmc_data *data;
struct mmc_host *mmc;
struct mmc_command *cmd;
void __iomem *regs;
- struct clk *core_clk;
struct clk *mux_clk;
struct clk *mmc_clk;
unsigned long req_rate;
@@ -172,7 +174,9 @@ struct meson_host {
int irq;
- bool vqmmc_enabled;
+ bool needs_pre_post_req;
+
+ spinlock_t lock;
};
#define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
@@ -429,6 +433,8 @@ static int meson_mmc_clk_init(struct meson_host *host)
clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+ clk_reg |= CLK_IRQ_SDIO_SLEEP(host);
writel(clk_reg, host->regs + SD_EMMC_CLOCK);
/* get the mux parents */
@@ -597,32 +603,18 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
*/
switch (ios->power_mode) {
case MMC_POWER_OFF:
- if (!IS_ERR(mmc->supply.vmmc))
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
-
- if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
- regulator_disable(mmc->supply.vqmmc);
- host->vqmmc_enabled = false;
- }
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ mmc_regulator_disable_vqmmc(mmc);
break;
case MMC_POWER_UP:
- if (!IS_ERR(mmc->supply.vmmc))
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
break;
case MMC_POWER_ON:
- if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
- int ret = regulator_enable(mmc->supply.vqmmc);
-
- if (ret < 0)
- dev_err(host->dev,
- "failed to enable vqmmc regulator\n");
- else
- host->vqmmc_enabled = true;
- }
+ mmc_regulator_enable_vqmmc(mmc);
break;
}
@@ -663,6 +655,8 @@ static void meson_mmc_request_done(struct mmc_host *mmc,
struct meson_host *host = mmc_priv(mmc);
host->cmd = NULL;
+ if (host->needs_pre_post_req)
+ meson_mmc_post_req(mmc, mrq, 0);
mmc_request_done(host->mmc, mrq);
}
@@ -746,7 +740,7 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
writel(start, host->regs + SD_EMMC_START);
}
-/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
+/* local sg copy for dram_access_quirk */
static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
size_t buflen, bool to_buffer)
{
@@ -764,21 +758,27 @@ static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data
sg_miter_start(&miter, sgl, nents, sg_flags);
while ((offset < buflen) && sg_miter_next(&miter)) {
- unsigned int len;
+ unsigned int buf_offset = 0;
+ unsigned int len, left;
+ u32 *buf = miter.addr;
len = min(miter.length, buflen - offset);
+ left = len;
- /* When dram_access_quirk, the bounce buffer is a iomem mapping */
- if (host->dram_access_quirk) {
- if (to_buffer)
- memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
- else
- memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
+ if (to_buffer) {
+ do {
+ writel(*buf++, host->bounce_iomem_buf + offset + buf_offset);
+
+ buf_offset += 4;
+ left -= 4;
+ } while (left);
} else {
- if (to_buffer)
- memcpy(host->bounce_buf + offset, miter.addr, len);
- else
- memcpy(miter.addr, host->bounce_buf + offset, len);
+ do {
+ *buf++ = readl(host->bounce_iomem_buf + offset + buf_offset);
+
+ buf_offset += 4;
+ left -= 4;
+ } while (left);
}
offset += len;
@@ -801,7 +801,6 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
- cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
meson_mmc_set_response_bits(cmd, &cmd_cfg);
@@ -830,7 +829,11 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
if (data->flags & MMC_DATA_WRITE) {
cmd_cfg |= CMD_CFG_DATA_WR;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+ if (host->dram_access_quirk)
+ meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+ else
+ sg_copy_to_buffer(data->sg, data->sg_len,
+ host->bounce_buf, xfer_bytes);
dma_wmb();
}
@@ -849,28 +852,56 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
}
+static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ int i;
+
+ /* Reject request if any element offset or size is not 32bit aligned */
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+ !IS_ALIGNED(sg->length, sizeof(u32))) {
+ dev_err(mmc_dev(mmc), "unaligned sg offset %u len %u\n",
+ data->sg->offset, data->sg->length);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct meson_host *host = mmc_priv(mmc);
- bool needs_pre_post_req = mrq->data &&
+ host->needs_pre_post_req = mrq->data &&
!(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
- if (needs_pre_post_req) {
+ /*
+ * The memory at the end of the controller used as bounce buffer for
+ * the dram_access_quirk only accepts 32bit read/write access,
+ * check the alignment and length of the data before starting the request.
+ */
+ if (host->dram_access_quirk && mrq->data) {
+ mrq->cmd->error = meson_mmc_validate_dram_access(mmc, mrq->data);
+ if (mrq->cmd->error) {
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ }
+
+ if (host->needs_pre_post_req) {
meson_mmc_get_transfer_mode(mmc, mrq);
if (!meson_mmc_desc_chain_mode(mrq->data))
- needs_pre_post_req = false;
+ host->needs_pre_post_req = false;
}
- if (needs_pre_post_req)
+ if (host->needs_pre_post_req)
meson_mmc_pre_req(mmc, mrq);
/* Stop execution */
writel(0, host->regs + SD_EMMC_START);
meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
-
- if (needs_pre_post_req)
- meson_mmc_post_req(mmc, mrq, 0);
}
static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
@@ -887,33 +918,53 @@ static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
}
}
+static void __meson_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct meson_host *host = mmc_priv(mmc);
+ u32 reg_irqen = IRQ_EN_MASK;
+
+ if (enable)
+ reg_irqen |= IRQ_SDIO;
+ writel(reg_irqen, host->regs + SD_EMMC_IRQ_EN);
+}
+
static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
{
struct meson_host *host = dev_id;
struct mmc_command *cmd;
- struct mmc_data *data;
- u32 irq_en, status, raw_status;
+ u32 status, raw_status, irq_mask = IRQ_EN_MASK;
irqreturn_t ret = IRQ_NONE;
- irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+ irq_mask |= IRQ_SDIO;
raw_status = readl(host->regs + SD_EMMC_STATUS);
- status = raw_status & irq_en;
+ status = raw_status & irq_mask;
if (!status) {
dev_dbg(host->dev,
"Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n",
- irq_en, raw_status);
+ irq_mask, raw_status);
return IRQ_NONE;
}
- if (WARN_ON(!host) || WARN_ON(!host->cmd))
- return IRQ_NONE;
-
/* ack all raised interrupts */
writel(status, host->regs + SD_EMMC_STATUS);
cmd = host->cmd;
- data = cmd->data;
+
+ if (status & IRQ_SDIO) {
+ spin_lock(&host->lock);
+ __meson_mmc_enable_sdio_irq(host->mmc, 0);
+ sdio_signal_irq(host->mmc);
+ spin_unlock(&host->lock);
+ status &= ~IRQ_SDIO;
+ if (!status)
+ return IRQ_HANDLED;
+ }
+
+ if (WARN_ON(!cmd))
+ return IRQ_NONE;
+
cmd->error = 0;
if (status & IRQ_CRC_ERR) {
dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status);
@@ -931,19 +982,13 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
meson_mmc_read_resp(host->mmc, cmd);
- if (status & IRQ_SDIO) {
- dev_dbg(host->dev, "IRQ: SDIO TODO.\n");
- ret = IRQ_HANDLED;
- }
-
if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
+ struct mmc_data *data = cmd->data;
+
if (data && !cmd->error)
data->bytes_xfered = data->blksz * data->blocks;
- if (meson_mmc_bounce_buf_read(data) ||
- meson_mmc_get_next_command(cmd))
- ret = IRQ_WAKE_THREAD;
- else
- ret = IRQ_HANDLED;
+
+ return IRQ_WAKE_THREAD;
}
out:
@@ -955,9 +1000,6 @@ out:
writel(start, host->regs + SD_EMMC_START);
}
- if (ret == IRQ_HANDLED)
- meson_mmc_request_done(host->mmc, cmd->mrq);
-
return ret;
}
@@ -999,7 +1041,11 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
if (meson_mmc_bounce_buf_read(data)) {
xfer_bytes = data->blksz * data->blocks;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+ if (host->dram_access_quirk)
+ meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+ else
+ sg_copy_from_buffer(data->sg, data->sg_len,
+ host->bounce_buf, xfer_bytes);
}
next_cmd = meson_mmc_get_next_command(cmd);
@@ -1011,20 +1057,6 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/*
- * NOTE: we only need this until the GPIO/pinctrl driver can handle
- * interrupts. For now, the MMC core will use this for polling.
- */
-static int meson_mmc_get_cd(struct mmc_host *mmc)
-{
- int status = mmc_gpio_get_cd(mmc);
-
- if (status == -ENOSYS)
- return 1; /* assume present */
-
- return status;
-}
-
static void meson_mmc_cfg_init(struct meson_host *host)
{
u32 cfg = 0;
@@ -1075,15 +1107,32 @@ static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
return -EINVAL;
}
+static void meson_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct meson_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ __meson_mmc_enable_sdio_irq(mmc, enable);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void meson_mmc_ack_sdio_irq(struct mmc_host *mmc)
+{
+ meson_mmc_enable_sdio_irq(mmc, 1);
+}
+
static const struct mmc_host_ops meson_mmc_ops = {
.request = meson_mmc_request,
.set_ios = meson_mmc_set_ios,
- .get_cd = meson_mmc_get_cd,
+ .get_cd = mmc_gpio_get_cd,
.pre_req = meson_mmc_pre_req,
.post_req = meson_mmc_post_req,
.execute_tuning = meson_mmc_resampling_tuning,
.card_busy = meson_mmc_card_busy,
.start_signal_voltage_switch = meson_mmc_voltage_switch,
+ .enable_sdio_irq = meson_mmc_enable_sdio_irq,
+ .ack_sdio_irq = meson_mmc_ack_sdio_irq,
};
static int meson_mmc_probe(struct platform_device *pdev)
@@ -1091,9 +1140,10 @@ static int meson_mmc_probe(struct platform_device *pdev)
struct resource *res;
struct meson_host *host;
struct mmc_host *mmc;
- int ret;
+ struct clk *core_clk;
+ int cd_irq, ret;
- mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct meson_host));
if (!mmc)
return -ENOMEM;
host = mmc_priv(mmc);
@@ -1106,47 +1156,41 @@ static int meson_mmc_probe(struct platform_device *pdev)
"amlogic,dram-access-quirk");
/* Get regulators and the supported OCR mask */
- host->vqmmc_enabled = false;
ret = mmc_regulator_get_supply(mmc);
if (ret)
- goto free_host;
+ return ret;
ret = mmc_of_parse(mmc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
- goto free_host;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "error parsing DT\n");
- host->data = (struct meson_mmc_data *)
- of_device_get_match_data(&pdev->dev);
- if (!host->data) {
- ret = -EINVAL;
- goto free_host;
- }
+ mmc->caps |= MMC_CAP_CMD23;
+
+ if (mmc->caps & MMC_CAP_SDIO_IRQ)
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
+ host->data = of_device_get_match_data(&pdev->dev);
+ if (!host->data)
+ return -EINVAL;
ret = device_reset_optional(&pdev->dev);
if (ret)
return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(host->regs)) {
- ret = PTR_ERR(host->regs);
- goto free_host;
- }
+ host->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(host->regs))
+ return PTR_ERR(host->regs);
host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0) {
- ret = -EINVAL;
- goto free_host;
- }
+ if (host->irq < 0)
+ return host->irq;
+
+ cd_irq = platform_get_irq_optional(pdev, 1);
+ mmc_gpio_set_cd_irq(mmc, cd_irq);
host->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (IS_ERR(host->pinctrl)) {
- ret = PTR_ERR(host->pinctrl);
- goto free_host;
- }
+ if (IS_ERR(host->pinctrl))
+ return PTR_ERR(host->pinctrl);
host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
"clk-gate");
@@ -1156,19 +1200,13 @@ static int meson_mmc_probe(struct platform_device *pdev)
host->pins_clk_gate = NULL;
}
- host->core_clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(host->core_clk)) {
- ret = PTR_ERR(host->core_clk);
- goto free_host;
- }
-
- ret = clk_prepare_enable(host->core_clk);
- if (ret)
- goto free_host;
+ core_clk = devm_clk_get_enabled(&pdev->dev, "core");
+ if (IS_ERR(core_clk))
+ return PTR_ERR(core_clk);
ret = meson_mmc_clk_init(host);
if (ret)
- goto err_core_clk;
+ return ret;
/* set config to sane default */
meson_mmc_cfg_init(host);
@@ -1178,10 +1216,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
/* clear, ack and enable interrupts */
writel(0, host->regs + SD_EMMC_IRQ_EN);
- writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
- host->regs + SD_EMMC_STATUS);
- writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
- host->regs + SD_EMMC_IRQ_EN);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
ret = request_threaded_irq(host->irq, meson_mmc_irq,
meson_mmc_irq_thread, IRQF_ONESHOT,
@@ -1189,7 +1225,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
if (ret)
goto err_init_clk;
- mmc->caps |= MMC_CAP_CMD23;
+ spin_lock_init(&host->lock);
+
if (host->dram_access_quirk) {
/* Limit segments to 1 due to low available sram memory */
mmc->max_segs = 1;
@@ -1225,8 +1262,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
/* data bounce buffer */
host->bounce_buf_size = mmc->max_req_size;
host->bounce_buf =
- dma_alloc_coherent(host->dev, host->bounce_buf_size,
- &host->bounce_dma_addr, GFP_KERNEL);
+ dmam_alloc_coherent(host->dev, host->bounce_buf_size,
+ &host->bounce_dma_addr, GFP_KERNEL);
if (host->bounce_buf == NULL) {
dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
ret = -ENOMEM;
@@ -1234,35 +1271,29 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
}
- host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
- &host->descs_dma_addr, GFP_KERNEL);
+ host->descs = dmam_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
+ &host->descs_dma_addr, GFP_KERNEL);
if (!host->descs) {
dev_err(host->dev, "Allocating descriptor DMA buffer failed\n");
ret = -ENOMEM;
- goto err_bounce_buf;
+ goto err_free_irq;
}
mmc->ops = &meson_mmc_ops;
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto err_free_irq;
return 0;
-err_bounce_buf:
- if (!host->dram_access_quirk)
- dma_free_coherent(host->dev, host->bounce_buf_size,
- host->bounce_buf, host->bounce_dma_addr);
err_free_irq:
free_irq(host->irq, host);
err_init_clk:
clk_disable_unprepare(host->mmc_clk);
-err_core_clk:
- clk_disable_unprepare(host->core_clk);
-free_host:
- mmc_free_host(mmc);
return ret;
}
-static int meson_mmc_remove(struct platform_device *pdev)
+static void meson_mmc_remove(struct platform_device *pdev)
{
struct meson_host *host = dev_get_drvdata(&pdev->dev);
@@ -1272,18 +1303,7 @@ static int meson_mmc_remove(struct platform_device *pdev)
writel(0, host->regs + SD_EMMC_IRQ_EN);
free_irq(host->irq, host);
- dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
- host->descs, host->descs_dma_addr);
-
- if (!host->dram_access_quirk)
- dma_free_coherent(host->dev, host->bounce_buf_size,
- host->bounce_buf, host->bounce_dma_addr);
-
clk_disable_unprepare(host->mmc_clk);
- clk_disable_unprepare(host->core_clk);
-
- mmc_free_host(host->mmc);
- return 0;
}
static const struct meson_mmc_data meson_gx_data = {
@@ -1291,6 +1311,7 @@ static const struct meson_mmc_data meson_gx_data = {
.rx_delay_mask = CLK_V2_RX_DELAY_MASK,
.always_on = CLK_V2_ALWAYS_ON,
.adjust = SD_EMMC_ADJUST,
+ .irq_sdio_sleep = CLK_V2_IRQ_SDIO_SLEEP,
};
static const struct meson_mmc_data meson_axg_data = {
@@ -1298,6 +1319,7 @@ static const struct meson_mmc_data meson_axg_data = {
.rx_delay_mask = CLK_V3_RX_DELAY_MASK,
.always_on = CLK_V3_ALWAYS_ON,
.adjust = SD_EMMC_V3_ADJUST,
+ .irq_sdio_sleep = CLK_V3_IRQ_SDIO_SLEEP,
};
static const struct of_device_id meson_mmc_of_match[] = {
diff --git a/drivers/mmc/host/meson-mx-sdhc-clkc.c b/drivers/mmc/host/meson-mx-sdhc-clkc.c
index e1f29b279123..6d619bd0a8dc 100644
--- a/drivers/mmc/host/meson-mx-sdhc-clkc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-clkc.c
@@ -12,8 +12,6 @@
#include "meson-mx-sdhc.h"
-#define MESON_SDHC_NUM_BUILTIN_CLKS 6
-
struct meson_mx_sdhc_clkc {
struct clk_mux src_sel;
struct clk_divider div;
@@ -73,12 +71,21 @@ static int meson_mx_sdhc_clk_hw_register(struct device *dev,
static int meson_mx_sdhc_gate_clk_hw_register(struct device *dev,
const char *name_suffix,
struct clk_hw *parent,
- struct clk_hw *hw)
+ struct clk_hw *hw,
+ struct clk_bulk_data *clk_bulk_data,
+ u8 bulk_index)
{
struct clk_parent_data parent_data = { .hw = parent };
+ int ret;
+
+ ret = meson_mx_sdhc_clk_hw_register(dev, name_suffix, &parent_data, 1,
+ &clk_gate_ops, hw);
+ if (ret)
+ return ret;
- return meson_mx_sdhc_clk_hw_register(dev, name_suffix, &parent_data, 1,
- &clk_gate_ops, hw);
+ clk_bulk_data[bulk_index].clk = devm_clk_hw_get_clk(dev, hw, name_suffix);
+
+ return PTR_ERR_OR_ZERO(clk_bulk_data[bulk_index].clk);
}
int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
@@ -117,7 +124,8 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->mod_clk_en.bit_idx = 15;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "mod_clk_on",
&clkc_data->div.hw,
- &clkc_data->mod_clk_en.hw);
+ &clkc_data->mod_clk_en.hw,
+ clk_bulk_data, 0);
if (ret)
return ret;
@@ -125,7 +133,8 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->tx_clk_en.bit_idx = 14;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "tx_clk_on",
&clkc_data->div.hw,
- &clkc_data->tx_clk_en.hw);
+ &clkc_data->tx_clk_en.hw,
+ clk_bulk_data, 1);
if (ret)
return ret;
@@ -133,7 +142,8 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->rx_clk_en.bit_idx = 13;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "rx_clk_on",
&clkc_data->div.hw,
- &clkc_data->rx_clk_en.hw);
+ &clkc_data->rx_clk_en.hw,
+ clk_bulk_data, 2);
if (ret)
return ret;
@@ -141,18 +151,7 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->sd_clk_en.bit_idx = 12;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "sd_clk_on",
&clkc_data->div.hw,
- &clkc_data->sd_clk_en.hw);
- if (ret)
- return ret;
-
- /*
- * TODO: Replace clk_hw.clk with devm_clk_hw_get_clk() once that is
- * available.
- */
- clk_bulk_data[0].clk = clkc_data->mod_clk_en.hw.clk;
- clk_bulk_data[1].clk = clkc_data->sd_clk_en.hw.clk;
- clk_bulk_data[2].clk = clkc_data->tx_clk_en.hw.clk;
- clk_bulk_data[3].clk = clkc_data->rx_clk_en.hw.clk;
-
- return 0;
+ &clkc_data->sd_clk_en.hw,
+ clk_bulk_data, 3);
+ return ret;
}
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index 7cd9c0ec2fcf..fb49ea71289e 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -65,10 +65,8 @@ static const struct regmap_config meson_mx_sdhc_regmap_config = {
.max_register = MESON_SDHC_CLK2,
};
-static void meson_mx_sdhc_hw_reset(struct mmc_host *mmc)
+static void meson_mx_sdhc_reset(struct meson_mx_sdhc_host *host)
{
- struct meson_mx_sdhc_host *host = mmc_priv(mmc);
-
regmap_write(host->regmap, MESON_SDHC_SRST, MESON_SDHC_SRST_MAIN_CTRL |
MESON_SDHC_SRST_RXFIFO | MESON_SDHC_SRST_TXFIFO |
MESON_SDHC_SRST_DPHY_RX | MESON_SDHC_SRST_DPHY_TX |
@@ -116,7 +114,7 @@ static void meson_mx_sdhc_wait_cmd_ready(struct mmc_host *mmc)
dev_warn(mmc_dev(mmc),
"Failed to poll for CMD_BUSY while processing CMD%d\n",
host->cmd->opcode);
- meson_mx_sdhc_hw_reset(mmc);
+ meson_mx_sdhc_reset(host);
}
ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_ESTA, esta,
@@ -127,7 +125,7 @@ static void meson_mx_sdhc_wait_cmd_ready(struct mmc_host *mmc)
dev_warn(mmc_dev(mmc),
"Failed to poll for ESTA[13:11] while processing CMD%d\n",
host->cmd->opcode);
- meson_mx_sdhc_hw_reset(mmc);
+ meson_mx_sdhc_reset(host);
}
}
@@ -135,6 +133,7 @@ static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc,
struct mmc_command *cmd)
{
struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ bool manual_stop = false;
u32 ictl, send;
int pack_len;
@@ -172,12 +171,27 @@ static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc,
else
/* software flush: */
ictl |= MESON_SDHC_ICTL_DATA_XFER_OK;
+
+ /*
+ * Mimic the logic from the vendor driver where (only)
+ * SD_IO_RW_EXTENDED commands with more than one block set the
+ * MESON_SDHC_MISC_MANUAL_STOP bit. This fixes the firmware
+ * download in the brcmfmac driver for a BCM43362/1 card.
+ * Without this sdio_memcpy_toio() (with a size of 219557
+ * bytes) times out if MESON_SDHC_MISC_MANUAL_STOP is not set.
+ */
+ manual_stop = cmd->data->blocks > 1 &&
+ cmd->opcode == SD_IO_RW_EXTENDED;
} else {
pack_len = 0;
ictl |= MESON_SDHC_ICTL_RESP_OK;
}
+ regmap_update_bits(host->regmap, MESON_SDHC_MISC,
+ MESON_SDHC_MISC_MANUAL_STOP,
+ manual_stop ? MESON_SDHC_MISC_MANUAL_STOP : 0);
+
if (cmd->opcode == MMC_STOP_TRANSMISSION)
send |= MESON_SDHC_SEND_DATA_STOP;
@@ -253,7 +267,7 @@ static int meson_mx_sdhc_enable_clks(struct mmc_host *mmc)
static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct meson_mx_sdhc_host *host = mmc_priv(mmc);
- u32 rx_clk_phase;
+ u32 val, rx_clk_phase;
int ret;
meson_mx_sdhc_disable_clks(mmc);
@@ -274,27 +288,11 @@ static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios)
mmc->actual_clock = clk_get_rate(host->sd_clk);
/*
- * according to Amlogic the following latching points are
- * selected with empirical values, there is no (known) formula
- * to calculate these.
+ * Phase 90 should work in most cases. For data transmission,
+ * meson_mx_sdhc_execute_tuning() will find a accurate value
*/
- if (mmc->actual_clock > 100000000) {
- rx_clk_phase = 1;
- } else if (mmc->actual_clock > 45000000) {
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- rx_clk_phase = 15;
- else
- rx_clk_phase = 11;
- } else if (mmc->actual_clock >= 25000000) {
- rx_clk_phase = 15;
- } else if (mmc->actual_clock > 5000000) {
- rx_clk_phase = 23;
- } else if (mmc->actual_clock > 1000000) {
- rx_clk_phase = 55;
- } else {
- rx_clk_phase = 1061;
- }
-
+ regmap_read(host->regmap, MESON_SDHC_CLKC, &val);
+ rx_clk_phase = FIELD_GET(MESON_SDHC_CLKC_CLK_DIV, val) / 4;
regmap_update_bits(host->regmap, MESON_SDHC_CLK2,
MESON_SDHC_CLK2_RX_CLK_PHASE,
FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE,
@@ -365,14 +363,14 @@ static void meson_mx_sdhc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int meson_mx_sdhc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmc_data *data = mrq->data;
- int dma_len;
+ unsigned int dma_len;
if (!data)
return 0;
dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
- if (dma_len <= 0) {
+ if (!dma_len) {
dev_err(mmc_dev(mmc), "dma_map_sg failed\n");
return -ENOMEM;
}
@@ -495,7 +493,6 @@ static int meson_mx_sdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
static const struct mmc_host_ops meson_mx_sdhc_ops = {
- .hw_reset = meson_mx_sdhc_hw_reset,
.request = meson_mx_sdhc_request,
.set_ios = meson_mx_sdhc_set_ios,
.card_busy = meson_mx_sdhc_card_busy,
@@ -618,7 +615,7 @@ static irqreturn_t meson_mx_sdhc_irq_thread(int irq, void *irq_data)
}
if (cmd->error == -EIO || cmd->error == -ETIMEDOUT)
- meson_mx_sdhc_hw_reset(host->mmc);
+ meson_mx_sdhc_reset(host);
else if (cmd->data)
/*
* Clear the FIFOs after completing data transfers to prevent
@@ -728,7 +725,7 @@ static void meson_mx_sdhc_init_hw(struct mmc_host *mmc)
{
struct meson_mx_sdhc_host *host = mmc_priv(mmc);
- meson_mx_sdhc_hw_reset(mmc);
+ meson_mx_sdhc_reset(host);
regmap_write(host->regmap, MESON_SDHC_CTRL,
FIELD_PREP(MESON_SDHC_CTRL_RX_PERIOD, 0xf) |
@@ -768,17 +765,10 @@ static int meson_mx_sdhc_probe(struct platform_device *pdev)
void __iomem *base;
int ret, irq;
- mmc = mmc_alloc_host(sizeof(*host), dev);
+ mmc = devm_mmc_alloc_host(dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
- ret = devm_add_action_or_reset(dev, (void(*)(void *))mmc_free_host,
- mmc);
- if (ret) {
- dev_err(dev, "Failed to register mmc_free_host action\n");
- return ret;
- }
-
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -838,6 +828,11 @@ static int meson_mx_sdhc_probe(struct platform_device *pdev)
goto err_disable_pclk;
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_disable_pclk;
+ }
+
ret = devm_request_threaded_irq(dev, irq, meson_mx_sdhc_irq,
meson_mx_sdhc_irq_thread, IRQF_ONESHOT,
NULL, host);
@@ -855,7 +850,7 @@ err_disable_pclk:
return ret;
}
-static int meson_mx_sdhc_remove(struct platform_device *pdev)
+static void meson_mx_sdhc_remove(struct platform_device *pdev)
{
struct meson_mx_sdhc_host *host = platform_get_drvdata(pdev);
@@ -864,8 +859,6 @@ static int meson_mx_sdhc_remove(struct platform_device *pdev)
meson_mx_sdhc_disable_clks(host->mmc);
clk_disable_unprepare(host->pclk);
-
- return 0;
}
static const struct meson_mx_sdhc_data meson_mx_sdhc_data_meson8 = {
@@ -900,7 +893,7 @@ MODULE_DEVICE_TABLE(of, meson_mx_sdhc_of_match);
static struct platform_driver meson_mx_sdhc_driver = {
.probe = meson_mx_sdhc_probe,
- .remove = meson_mx_sdhc_remove,
+ .remove = meson_mx_sdhc_remove,
.driver = {
.name = "meson-mx-sdhc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index d4a48916bfb6..5921e2cb2180 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -19,6 +19,7 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
+#include <linux/regmap.h>
#include <linux/timer.h>
#include <linux/types.h>
@@ -98,17 +99,16 @@
#define MESON_MX_SDIO_RESPONSE_CRC16_BITS (16 - 1)
#define MESON_MX_SDIO_MAX_SLOTS 3
+struct meson_mx_mmc_host_clkc {
+ struct clk_divider cfg_div;
+ struct clk_fixed_factor fixed_div2;
+};
+
struct meson_mx_mmc_host {
struct device *controller_dev;
- struct clk *parent_clk;
- struct clk *core_clk;
- struct clk_divider cfg_div;
struct clk *cfg_div_clk;
- struct clk_fixed_factor fixed_factor;
- struct clk *fixed_factor_clk;
-
- void __iomem *base;
+ struct regmap *regmap;
int irq;
spinlock_t irq_lock;
@@ -122,22 +122,10 @@ struct meson_mx_mmc_host {
int error;
};
-static void meson_mx_mmc_mask_bits(struct mmc_host *mmc, char reg, u32 mask,
- u32 val)
-{
- struct meson_mx_mmc_host *host = mmc_priv(mmc);
- u32 regval;
-
- regval = readl(host->base + reg);
- regval &= ~mask;
- regval |= (val & mask);
-
- writel(regval, host->base + reg);
-}
-
static void meson_mx_mmc_soft_reset(struct meson_mx_mmc_host *host)
{
- writel(MESON_MX_SDIO_IRQC_SOFT_RESET, host->base + MESON_MX_SDIO_IRQC);
+ regmap_write(host->regmap, MESON_MX_SDIO_IRQC,
+ MESON_MX_SDIO_IRQC_SOFT_RESET);
udelay(2);
}
@@ -158,7 +146,7 @@ static void meson_mx_mmc_start_cmd(struct mmc_host *mmc,
struct meson_mx_mmc_host *host = mmc_priv(mmc);
unsigned int pack_size;
unsigned long irqflags, timeout;
- u32 mult, send = 0, ext = 0;
+ u32 send = 0, ext = 0;
host->cmd = cmd;
@@ -215,25 +203,22 @@ static void meson_mx_mmc_start_cmd(struct mmc_host *mmc,
spin_lock_irqsave(&host->irq_lock, irqflags);
- mult = readl(host->base + MESON_MX_SDIO_MULT);
- mult &= ~MESON_MX_SDIO_MULT_PORT_SEL_MASK;
- mult |= FIELD_PREP(MESON_MX_SDIO_MULT_PORT_SEL_MASK, host->slot_id);
- mult |= BIT(31);
- writel(mult, host->base + MESON_MX_SDIO_MULT);
+ regmap_update_bits(host->regmap, MESON_MX_SDIO_MULT,
+ MESON_MX_SDIO_MULT_PORT_SEL_MASK | BIT(31),
+ FIELD_PREP(MESON_MX_SDIO_MULT_PORT_SEL_MASK,
+ host->slot_id) | BIT(31));
/* enable the CMD done interrupt */
- meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQC,
- MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN,
- MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN);
+ regmap_set_bits(host->regmap, MESON_MX_SDIO_IRQC,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN);
/* clear pending interrupts */
- meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQS,
- MESON_MX_SDIO_IRQS_CMD_INT,
- MESON_MX_SDIO_IRQS_CMD_INT);
+ regmap_set_bits(host->regmap, MESON_MX_SDIO_IRQS,
+ MESON_MX_SDIO_IRQS_CMD_INT);
- writel(cmd->arg, host->base + MESON_MX_SDIO_ARGU);
- writel(ext, host->base + MESON_MX_SDIO_EXT);
- writel(send, host->base + MESON_MX_SDIO_SEND);
+ regmap_write(host->regmap, MESON_MX_SDIO_ARGU, cmd->arg);
+ regmap_write(host->regmap, MESON_MX_SDIO_EXT, ext);
+ regmap_write(host->regmap, MESON_MX_SDIO_SEND, send);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
@@ -263,14 +248,13 @@ static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
- meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
- MESON_MX_SDIO_CONF_BUS_WIDTH, 0);
+ regmap_clear_bits(host->regmap, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH);
break;
case MMC_BUS_WIDTH_4:
- meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
- MESON_MX_SDIO_CONF_BUS_WIDTH,
- MESON_MX_SDIO_CONF_BUS_WIDTH);
+ regmap_set_bits(host->regmap, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH);
break;
case MMC_BUS_WIDTH_8:
@@ -351,8 +335,8 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
if (mrq->data)
- writel(sg_dma_address(mrq->data->sg),
- host->base + MESON_MX_SDIO_ADDR);
+ regmap_write(host->regmap, MESON_MX_SDIO_ADDR,
+ sg_dma_address(mrq->data->sg));
if (mrq->sbc)
meson_mx_mmc_start_cmd(mmc, mrq->sbc);
@@ -364,24 +348,26 @@ static void meson_mx_mmc_read_response(struct mmc_host *mmc,
struct mmc_command *cmd)
{
struct meson_mx_mmc_host *host = mmc_priv(mmc);
- u32 mult;
- int i, resp[4];
+ unsigned int i, resp[4];
- mult = readl(host->base + MESON_MX_SDIO_MULT);
- mult |= MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX;
- mult &= ~MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK;
- mult |= FIELD_PREP(MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK, 0);
- writel(mult, host->base + MESON_MX_SDIO_MULT);
+ regmap_update_bits(host->regmap, MESON_MX_SDIO_MULT,
+ MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX |
+ MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK,
+ MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX |
+ FIELD_PREP(MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK,
+ 0));
if (cmd->flags & MMC_RSP_136) {
for (i = 0; i <= 3; i++)
- resp[3 - i] = readl(host->base + MESON_MX_SDIO_ARGU);
+ regmap_read(host->regmap, MESON_MX_SDIO_ARGU,
+ &resp[3 - i]);
+
cmd->resp[0] = (resp[0] << 8) | ((resp[1] >> 24) & 0xff);
cmd->resp[1] = (resp[1] << 8) | ((resp[2] >> 24) & 0xff);
cmd->resp[2] = (resp[2] << 8) | ((resp[3] >> 24) & 0xff);
cmd->resp[3] = (resp[3] << 8);
} else if (cmd->flags & MMC_RSP_PRESENT) {
- cmd->resp[0] = readl(host->base + MESON_MX_SDIO_ARGU);
+ regmap_read(host->regmap, MESON_MX_SDIO_ARGU, &cmd->resp[0]);
}
}
@@ -422,8 +408,8 @@ static irqreturn_t meson_mx_mmc_irq(int irq, void *data)
spin_lock(&host->irq_lock);
- irqs = readl(host->base + MESON_MX_SDIO_IRQS);
- send = readl(host->base + MESON_MX_SDIO_SEND);
+ regmap_read(host->regmap, MESON_MX_SDIO_IRQS, &irqs);
+ regmap_read(host->regmap, MESON_MX_SDIO_SEND, &send);
if (irqs & MESON_MX_SDIO_IRQS_CMD_INT)
ret = meson_mx_mmc_process_cmd_irq(host, irqs, send);
@@ -431,7 +417,7 @@ static irqreturn_t meson_mx_mmc_irq(int irq, void *data)
ret = IRQ_HANDLED;
/* finally ACK all pending interrupts */
- writel(irqs, host->base + MESON_MX_SDIO_IRQS);
+ regmap_write(host->regmap, MESON_MX_SDIO_IRQS, irqs);
spin_unlock(&host->irq_lock);
@@ -446,12 +432,11 @@ static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data)
if (WARN_ON(!cmd))
return IRQ_HANDLED;
- del_timer_sync(&host->cmd_timeout);
+ timer_delete_sync(&host->cmd_timeout);
if (cmd->data) {
dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg,
- cmd->data->sg_len,
- mmc_get_dma_dir(cmd->data));
+ cmd->data->sg_len, mmc_get_dma_dir(cmd->data));
cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks;
}
@@ -467,16 +452,16 @@ static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data)
static void meson_mx_mmc_timeout(struct timer_list *t)
{
- struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout);
+ struct meson_mx_mmc_host *host = timer_container_of(host, t,
+ cmd_timeout);
unsigned long irqflags;
- u32 irqc;
+ u32 irqs, argu;
spin_lock_irqsave(&host->irq_lock, irqflags);
/* disable the CMD interrupt */
- irqc = readl(host->base + MESON_MX_SDIO_IRQC);
- irqc &= ~MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN;
- writel(irqc, host->base + MESON_MX_SDIO_IRQC);
+ regmap_clear_bits(host->regmap, MESON_MX_SDIO_IRQC,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
@@ -487,10 +472,12 @@ static void meson_mx_mmc_timeout(struct timer_list *t)
if (!host->cmd)
return;
+ regmap_read(host->regmap, MESON_MX_SDIO_IRQS, &irqs);
+ regmap_read(host->regmap, MESON_MX_SDIO_ARGU, &argu);
+
dev_dbg(mmc_dev(host->mmc),
"Timeout on CMD%u (IRQS = 0x%08x, ARGU = 0x%08x)\n",
- host->cmd->opcode, readl(host->base + MESON_MX_SDIO_IRQS),
- readl(host->base + MESON_MX_SDIO_ARGU));
+ host->cmd->opcode, irqs, argu);
host->cmd->error = -ETIMEDOUT;
@@ -506,23 +493,30 @@ static struct mmc_host_ops meson_mx_mmc_ops = {
static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
{
- struct device_node *slot_node;
- struct platform_device *pdev;
+ struct platform_device *pdev = NULL;
+
+ for_each_available_child_of_node_scoped(parent->of_node, slot_node) {
+ if (!of_device_is_compatible(slot_node, "mmc-slot"))
+ continue;
+
+ /*
+ * TODO: the MMC core framework currently does not support
+ * controllers with multiple slots properly. So we only
+ * register the first slot for now.
+ */
+ if (pdev) {
+ dev_warn(parent,
+ "more than one 'mmc-slot' compatible child found - using the first one and ignoring all subsequent ones\n");
+ break;
+ }
- /*
- * TODO: the MMC core framework currently does not support
- * controllers with multiple slots properly. So we only register
- * the first slot for now
- */
- slot_node = of_get_compatible_child(parent->of_node, "mmc-slot");
- if (!slot_node) {
- dev_warn(parent, "no 'mmc-slot' sub-node found\n");
- return ERR_PTR(-ENOENT);
+ pdev = of_platform_device_create(slot_node, NULL, parent);
+ if (!pdev)
+ dev_err(parent,
+ "Failed to create platform device for mmc-slot node '%pOF'\n",
+ slot_node);
}
- pdev = of_platform_device_create(slot_node, NULL, parent);
- of_node_put(slot_node);
-
return pdev;
}
@@ -532,16 +526,14 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
struct device *slot_dev = mmc_dev(mmc);
int ret;
- if (of_property_read_u32(slot_dev->of_node, "reg", &host->slot_id)) {
- dev_err(slot_dev, "missing 'reg' property\n");
- return -EINVAL;
- }
+ if (of_property_read_u32(slot_dev->of_node, "reg", &host->slot_id))
+ return dev_err_probe(slot_dev, -EINVAL,
+ "missing 'reg' property\n");
- if (host->slot_id >= MESON_MX_SDIO_MAX_SLOTS) {
- dev_err(slot_dev, "invalid 'reg' property value %d\n",
- host->slot_id);
- return -EINVAL;
- }
+ if (host->slot_id >= MESON_MX_SDIO_MAX_SLOTS)
+ return dev_err_probe(slot_dev, -EINVAL,
+ "invalid 'reg' property value %d\n",
+ host->slot_id);
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
@@ -560,8 +552,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
/* Get the min and max supported clock rates */
mmc->f_min = clk_round_rate(host->cfg_div_clk, 1);
- mmc->f_max = clk_round_rate(host->cfg_div_clk,
- clk_get_rate(host->parent_clk));
+ mmc->f_max = clk_round_rate(host->cfg_div_clk, ULONG_MAX);
mmc->caps |= MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
mmc->ops = &meson_mx_mmc_ops;
@@ -577,70 +568,89 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
return 0;
}
-static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
+static struct clk *meson_mx_mmc_register_clk(struct device *dev,
+ void __iomem *base)
{
- struct clk_init_data init;
- const char *clk_div_parent, *clk_fixed_factor_parent;
-
- clk_fixed_factor_parent = __clk_get_name(host->parent_clk);
- init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
- "%s#fixed_factor",
- dev_name(host->controller_dev));
- if (!init.name)
- return -ENOMEM;
+ const char *fixed_div2_name, *cfg_div_name;
+ struct meson_mx_mmc_host_clkc *host_clkc;
+ struct clk *clk;
+ int ret;
- init.ops = &clk_fixed_factor_ops;
- init.flags = 0;
- init.parent_names = &clk_fixed_factor_parent;
- init.num_parents = 1;
- host->fixed_factor.div = 2;
- host->fixed_factor.mult = 1;
- host->fixed_factor.hw.init = &init;
-
- host->fixed_factor_clk = devm_clk_register(host->controller_dev,
- &host->fixed_factor.hw);
- if (WARN_ON(IS_ERR(host->fixed_factor_clk)))
- return PTR_ERR(host->fixed_factor_clk);
-
- clk_div_parent = __clk_get_name(host->fixed_factor_clk);
- init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
- "%s#div", dev_name(host->controller_dev));
- if (!init.name)
- return -ENOMEM;
+ /* use a dedicated memory allocation for the clock controller to
+ * prevent use-after-free as meson_mx_mmc_host is free'd before
+ * dev (controller dev, not mmc_host->dev) is free'd.
+ */
+ host_clkc = devm_kzalloc(dev, sizeof(*host_clkc), GFP_KERNEL);
+ if (!host_clkc)
+ return ERR_PTR(-ENOMEM);
+
+ fixed_div2_name = devm_kasprintf(dev, GFP_KERNEL, "%s#fixed_div2",
+ dev_name(dev));
+ if (!fixed_div2_name)
+ return ERR_PTR(-ENOMEM);
+
+ host_clkc->fixed_div2.div = 2;
+ host_clkc->fixed_div2.mult = 1;
+ host_clkc->fixed_div2.hw.init = CLK_HW_INIT_FW_NAME(fixed_div2_name,
+ "clkin",
+ &clk_fixed_factor_ops,
+ 0);
+ ret = devm_clk_hw_register(dev, &host_clkc->fixed_div2.hw);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Failed to register %s clock\n",
+ fixed_div2_name);
+
+ cfg_div_name = devm_kasprintf(dev, GFP_KERNEL, "%s#div", dev_name(dev));
+ if (!cfg_div_name)
+ return ERR_PTR(-ENOMEM);
+
+ host_clkc->cfg_div.reg = base + MESON_MX_SDIO_CONF;
+ host_clkc->cfg_div.shift = MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT;
+ host_clkc->cfg_div.width = MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH;
+ host_clkc->cfg_div.hw.init = CLK_HW_INIT_HW(cfg_div_name,
+ &host_clkc->fixed_div2.hw,
+ &clk_divider_ops,
+ CLK_DIVIDER_ALLOW_ZERO);
+ ret = devm_clk_hw_register(dev, &host_clkc->cfg_div.hw);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Failed to register %s clock\n",
+ cfg_div_name);
- init.ops = &clk_divider_ops;
- init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = &clk_div_parent;
- init.num_parents = 1;
- host->cfg_div.reg = host->base + MESON_MX_SDIO_CONF;
- host->cfg_div.shift = MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT;
- host->cfg_div.width = MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH;
- host->cfg_div.hw.init = &init;
- host->cfg_div.flags = CLK_DIVIDER_ALLOW_ZERO;
-
- host->cfg_div_clk = devm_clk_register(host->controller_dev,
- &host->cfg_div.hw);
- if (WARN_ON(IS_ERR(host->cfg_div_clk)))
- return PTR_ERR(host->cfg_div_clk);
+ clk = devm_clk_hw_get_clk(dev, &host_clkc->cfg_div.hw, "cfg_div_clk");
+ if (IS_ERR(clk))
+ return dev_err_ptr_probe(dev, PTR_ERR(clk),
+ "Failed to get the cfg_div clock\n");
- return 0;
+ return clk;
}
static int meson_mx_mmc_probe(struct platform_device *pdev)
{
+ const struct regmap_config meson_mx_sdio_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MESON_MX_SDIO_EXT,
+ };
struct platform_device *slot_pdev;
struct mmc_host *mmc;
struct meson_mx_mmc_host *host;
+ struct clk *core_clk;
+ void __iomem *base;
int ret, irq;
u32 conf;
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
slot_pdev = meson_mx_mmc_slot_pdev(&pdev->dev);
if (!slot_pdev)
return -ENODEV;
- else if (IS_ERR(slot_pdev))
- return PTR_ERR(slot_pdev);
- mmc = mmc_alloc_host(sizeof(*host), &slot_pdev->dev);
+ mmc = devm_mmc_alloc_host(&slot_pdev->dev, sizeof(*host));
if (!mmc) {
ret = -ENOMEM;
goto error_unregister_slot_pdev;
@@ -655,46 +665,48 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- host->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(host->base)) {
- ret = PTR_ERR(host->base);
- goto error_free_mmc;
+ host->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &meson_mx_sdio_regmap_config);
+ if (IS_ERR(host->regmap)) {
+ ret = dev_err_probe(host->controller_dev, PTR_ERR(host->regmap),
+ "Failed to initialize regmap\n");
+ goto error_unregister_slot_pdev;
}
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto error_unregister_slot_pdev;
+ }
+
ret = devm_request_threaded_irq(host->controller_dev, irq,
meson_mx_mmc_irq,
meson_mx_mmc_irq_thread, IRQF_ONESHOT,
NULL, host);
- if (ret)
- goto error_free_mmc;
-
- host->core_clk = devm_clk_get(host->controller_dev, "core");
- if (IS_ERR(host->core_clk)) {
- ret = PTR_ERR(host->core_clk);
- goto error_free_mmc;
+ if (ret) {
+ dev_err_probe(host->controller_dev, ret,
+ "Failed to request IRQ\n");
+ goto error_unregister_slot_pdev;
}
- host->parent_clk = devm_clk_get(host->controller_dev, "clkin");
- if (IS_ERR(host->parent_clk)) {
- ret = PTR_ERR(host->parent_clk);
- goto error_free_mmc;
+ core_clk = devm_clk_get_enabled(host->controller_dev, "core");
+ if (IS_ERR(core_clk)) {
+ ret = dev_err_probe(host->controller_dev, PTR_ERR(core_clk),
+ "Failed to get and enable 'core' clock\n");
+ goto error_unregister_slot_pdev;
}
- ret = meson_mx_mmc_register_clks(host);
- if (ret)
- goto error_free_mmc;
-
- ret = clk_prepare_enable(host->core_clk);
- if (ret) {
- dev_err(host->controller_dev, "Failed to enable core clock\n");
- goto error_free_mmc;
+ host->cfg_div_clk = meson_mx_mmc_register_clk(&pdev->dev, base);
+ if (IS_ERR(host->cfg_div_clk)) {
+ ret = PTR_ERR(host->cfg_div_clk);
+ goto error_unregister_slot_pdev;
}
ret = clk_prepare_enable(host->cfg_div_clk);
if (ret) {
- dev_err(host->controller_dev, "Failed to enable MMC clock\n");
- goto error_disable_core_clk;
+ dev_err_probe(host->controller_dev, ret,
+ "Failed to enable MMC (cfg div) clock\n");
+ goto error_unregister_slot_pdev;
}
conf = 0;
@@ -702,44 +714,35 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
conf |= FIELD_PREP(MESON_MX_SDIO_CONF_M_ENDIAN_MASK, 0x3);
conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_NWR_MASK, 0x2);
conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK, 0x2);
- writel(conf, host->base + MESON_MX_SDIO_CONF);
+ regmap_write(host->regmap, MESON_MX_SDIO_CONF, conf);
meson_mx_mmc_soft_reset(host);
ret = meson_mx_mmc_add_host(host);
if (ret)
- goto error_disable_clks;
+ goto error_disable_div_clk;
return 0;
-error_disable_clks:
+error_disable_div_clk:
clk_disable_unprepare(host->cfg_div_clk);
-error_disable_core_clk:
- clk_disable_unprepare(host->core_clk);
-error_free_mmc:
- mmc_free_host(mmc);
error_unregister_slot_pdev:
of_platform_device_destroy(&slot_pdev->dev, NULL);
return ret;
}
-static int meson_mx_mmc_remove(struct platform_device *pdev)
+static void meson_mx_mmc_remove(struct platform_device *pdev)
{
struct meson_mx_mmc_host *host = platform_get_drvdata(pdev);
struct device *slot_dev = mmc_dev(host->mmc);
- del_timer_sync(&host->cmd_timeout);
+ timer_delete_sync(&host->cmd_timeout);
mmc_remove_host(host->mmc);
of_platform_device_destroy(slot_dev, NULL);
clk_disable_unprepare(host->cfg_div_clk);
- clk_disable_unprepare(host->core_clk);
-
- mmc_free_host(host->mmc);
-
- return 0;
}
static const struct of_device_id meson_mx_mmc_of_match[] = {
@@ -751,7 +754,7 @@ MODULE_DEVICE_TABLE(of, meson_mx_mmc_of_match);
static struct platform_driver meson_mx_mmc_driver = {
.probe = meson_mx_mmc_probe,
- .remove = meson_mx_mmc_remove,
+ .remove = meson_mx_mmc_remove,
.driver = {
.name = "meson-mx-sdio",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
index a5e05ed0fda3..79836705c176 100644
--- a/drivers/mmc/host/mmc_hsq.c
+++ b/drivers/mmc/host/mmc_hsq.c
@@ -13,9 +13,6 @@
#include "mmc_hsq.h"
-#define HSQ_NUM_SLOTS 64
-#define HSQ_INVALID_TAG HSQ_NUM_SLOTS
-
static void mmc_hsq_retry_handler(struct work_struct *work)
{
struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
@@ -24,6 +21,25 @@ static void mmc_hsq_retry_handler(struct work_struct *work)
mmc->ops->request(mmc, hsq->mrq);
}
+static void mmc_hsq_modify_threshold(struct mmc_hsq *hsq)
+{
+ struct mmc_host *mmc = hsq->mmc;
+ struct mmc_request *mrq;
+ unsigned int tag, need_change = 0;
+
+ mmc->hsq_depth = HSQ_NORMAL_DEPTH;
+ for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
+ mrq = hsq->slot[tag].mrq;
+ if (mrq && mrq->data &&
+ (mrq->data->blksz * mrq->data->blocks == 4096) &&
+ (mrq->data->flags & MMC_DATA_WRITE) &&
+ (++need_change == 2)) {
+ mmc->hsq_depth = HSQ_PERFORMANCE_DEPTH;
+ break;
+ }
+ }
+}
+
static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
{
struct mmc_host *mmc = hsq->mmc;
@@ -34,7 +50,7 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
spin_lock_irqsave(&hsq->lock, flags);
/* Make sure we are not already running a request now */
- if (hsq->mrq) {
+ if (hsq->mrq || hsq->recovery_halt) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
@@ -45,6 +61,8 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
return;
}
+ mmc_hsq_modify_threshold(hsq);
+
slot = &hsq->slot[hsq->next_tag];
hsq->mrq = slot->mrq;
hsq->qcnt--;
@@ -73,7 +91,6 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
{
- struct hsq_slot *slot;
int tag;
/*
@@ -82,29 +99,12 @@ static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
*/
if (!remains) {
hsq->next_tag = HSQ_INVALID_TAG;
+ hsq->tail_tag = HSQ_INVALID_TAG;
return;
}
- /*
- * Increasing the next tag and check if the corresponding request is
- * available, if yes, then we found a candidate request.
- */
- if (++hsq->next_tag != HSQ_INVALID_TAG) {
- slot = &hsq->slot[hsq->next_tag];
- if (slot->mrq)
- return;
- }
-
- /* Othersie we should iterate all slots to find a available tag. */
- for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
- slot = &hsq->slot[tag];
- if (slot->mrq)
- break;
- }
-
- if (tag == HSQ_NUM_SLOTS)
- tag = HSQ_INVALID_TAG;
-
+ tag = hsq->tag_slot[hsq->next_tag];
+ hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
hsq->next_tag = tag;
}
@@ -233,8 +233,14 @@ static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
* Set the next tag as current request tag if no available
* next tag.
*/
- if (hsq->next_tag == HSQ_INVALID_TAG)
+ if (hsq->next_tag == HSQ_INVALID_TAG) {
hsq->next_tag = tag;
+ hsq->tail_tag = tag;
+ hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
+ } else {
+ hsq->tag_slot[hsq->tail_tag] = tag;
+ hsq->tail_tag = tag;
+ }
hsq->qcnt++;
@@ -339,8 +345,10 @@ static const struct mmc_cqe_ops mmc_hsq_ops = {
int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
{
+ int i;
hsq->num_slots = HSQ_NUM_SLOTS;
hsq->next_tag = HSQ_INVALID_TAG;
+ hsq->tail_tag = HSQ_INVALID_TAG;
hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
sizeof(struct hsq_slot), GFP_KERNEL);
@@ -350,6 +358,10 @@ int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
hsq->mmc = mmc;
hsq->mmc->cqe_private = hsq;
mmc->cqe_ops = &mmc_hsq_ops;
+ mmc->hsq_depth = HSQ_NORMAL_DEPTH;
+
+ for (i = 0; i < HSQ_NUM_SLOTS; i++)
+ hsq->tag_slot[i] = HSQ_INVALID_TAG;
INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
spin_lock_init(&hsq->lock);
diff --git a/drivers/mmc/host/mmc_hsq.h b/drivers/mmc/host/mmc_hsq.h
index ffdd9cd172c3..dd352a6ac32a 100644
--- a/drivers/mmc/host/mmc_hsq.h
+++ b/drivers/mmc/host/mmc_hsq.h
@@ -2,6 +2,20 @@
#ifndef LINUX_MMC_HSQ_H
#define LINUX_MMC_HSQ_H
+#define HSQ_NUM_SLOTS 64
+#define HSQ_INVALID_TAG HSQ_NUM_SLOTS
+
+/*
+ * For MMC host software queue, we only allow 2 requests in
+ * flight to avoid a long latency.
+ */
+#define HSQ_NORMAL_DEPTH 2
+/*
+ * For 4k random writes, we allow hsq_depth to increase to 5
+ * for better performance.
+ */
+#define HSQ_PERFORMANCE_DEPTH 5
+
struct hsq_slot {
struct mmc_request *mrq;
};
@@ -17,6 +31,8 @@ struct mmc_hsq {
int next_tag;
int num_slots;
int qcnt;
+ int tail_tag;
+ int tag_slot[HSQ_NUM_SLOTS];
bool enabled;
bool waiting_for_idle;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 65c65bb5737f..42936e248c55 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bio.h>
-#include <linux/dma-mapping.h>
#include <linux/crc7.h>
#include <linux/crc-itu-t.h>
#include <linux/scatterlist.h>
@@ -27,7 +26,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/mmc_spi.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* NOTES:
@@ -119,19 +118,14 @@ struct mmc_spi_host {
struct spi_transfer status;
struct spi_message readback;
- /* underlying DMA-aware controller, or null */
- struct device *dma_dev;
-
/* buffer used for commands and for message "overhead" */
struct scratch *data;
- dma_addr_t data_dma;
/* Specs say to write ones most of the time, even when the card
* has no need to read its input data; and many cards won't care.
* This is our source of those ones.
*/
void *ones;
- dma_addr_t ones_dma;
};
@@ -147,11 +141,8 @@ static inline int mmc_cs_off(struct mmc_spi_host *host)
return spi_setup(host->spi);
}
-static int
-mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
+static int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned int len)
{
- int status;
-
if (len > sizeof(*host->data)) {
WARN_ON(1);
return -EIO;
@@ -159,19 +150,7 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
host->status.len = len;
- if (host->dma_dev)
- dma_sync_single_for_device(host->dma_dev,
- host->data_dma, sizeof(*host->data),
- DMA_FROM_DEVICE);
-
- status = spi_sync_locked(host->spi, &host->readback);
-
- if (host->dma_dev)
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*host->data),
- DMA_FROM_DEVICE);
-
- return status;
+ return spi_sync_locked(host->spi, &host->readback);
}
static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
@@ -180,7 +159,7 @@ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
u8 *cp = host->data->status;
unsigned long start = jiffies;
- while (1) {
+ do {
int status;
unsigned i;
@@ -193,16 +172,9 @@ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
return cp[i];
}
- if (time_is_before_jiffies(start + timeout))
- break;
-
- /* If we need long timeouts, we may release the CPU.
- * We use jiffies here because we want to have a relation
- * between elapsed time and the blocking of the scheduler.
- */
- if (time_is_before_jiffies(start + 1))
- schedule();
- }
+ /* If we need long timeouts, we may release the CPU */
+ cond_resched();
+ } while (time_is_after_jiffies(start + timeout));
return -ETIMEDOUT;
}
@@ -250,10 +222,6 @@ static int mmc_spi_response_get(struct mmc_spi_host *host,
u8 leftover = 0;
unsigned short rotator;
int i;
- char tag[32];
-
- snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s",
- cmd->opcode, maptype(cmd));
/* Except for data block reads, the whole response will already
* be stored in the scratch buffer. It's somewhere after the
@@ -406,8 +374,9 @@ checkstatus:
}
if (value < 0)
- dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
- tag, cmd->resp[0], cmd->resp[1]);
+ dev_dbg(&host->spi->dev,
+ " ... CMD%d response SPI_%s: resp %04x %08x\n",
+ cmd->opcode, maptype(cmd), cmd->resp[0], cmd->resp[1]);
/* disable chipselect on errors and some success cases */
if (value >= 0 && cs_on)
@@ -513,23 +482,11 @@ mmc_spi_command_send(struct mmc_spi_host *host,
t = &host->t;
memset(t, 0, sizeof(*t));
t->tx_buf = t->rx_buf = data->status;
- t->tx_dma = t->rx_dma = host->data_dma;
t->len = cp - data->status;
t->cs_change = 1;
spi_message_add_tail(t, &host->m);
- if (host->dma_dev) {
- host->m.is_dma_mapped = 1;
- dma_sync_single_for_device(host->dma_dev,
- host->data_dma, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
- }
status = spi_sync_locked(host->spi, &host->m);
-
- if (host->dma_dev)
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
if (status < 0) {
dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
cmd->error = status;
@@ -547,28 +504,19 @@ mmc_spi_command_send(struct mmc_spi_host *host,
* We always provide TX data for data and CRC. The MMC/SD protocol
* requires us to write ones; but Linux defaults to writing zeroes;
* so we explicitly initialize it to all ones on RX paths.
- *
- * We also handle DMA mapping, so the underlying SPI controller does
- * not need to (re)do it for each message.
*/
static void
-mmc_spi_setup_data_message(
- struct mmc_spi_host *host,
- int multiple,
- enum dma_data_direction direction)
+mmc_spi_setup_data_message(struct mmc_spi_host *host, bool multiple, bool write)
{
struct spi_transfer *t;
struct scratch *scratch = host->data;
- dma_addr_t dma = host->data_dma;
spi_message_init(&host->m);
- if (dma)
- host->m.is_dma_mapped = 1;
/* for reads, readblock() skips 0xff bytes before finding
* the token; for writes, this transfer issues that token.
*/
- if (direction == DMA_TO_DEVICE) {
+ if (write) {
t = &host->token;
memset(t, 0, sizeof(*t));
t->len = 1;
@@ -577,8 +525,6 @@ mmc_spi_setup_data_message(
else
scratch->data_token = SPI_TOKEN_SINGLE;
t->tx_buf = &scratch->data_token;
- if (dma)
- t->tx_dma = dma + offsetof(struct scratch, data_token);
spi_message_add_tail(t, &host->m);
}
@@ -588,24 +534,18 @@ mmc_spi_setup_data_message(
t = &host->t;
memset(t, 0, sizeof(*t));
t->tx_buf = host->ones;
- t->tx_dma = host->ones_dma;
/* length and actual buffer info are written later */
spi_message_add_tail(t, &host->m);
t = &host->crc;
memset(t, 0, sizeof(*t));
t->len = 2;
- if (direction == DMA_TO_DEVICE) {
+ if (write) {
/* the actual CRC may get written later */
t->tx_buf = &scratch->crc_val;
- if (dma)
- t->tx_dma = dma + offsetof(struct scratch, crc_val);
} else {
t->tx_buf = host->ones;
- t->tx_dma = host->ones_dma;
t->rx_buf = &scratch->crc_val;
- if (dma)
- t->rx_dma = dma + offsetof(struct scratch, crc_val);
}
spi_message_add_tail(t, &host->m);
@@ -623,15 +563,12 @@ mmc_spi_setup_data_message(
* the next token (next data block, or STOP_TRAN). We can try to
* minimize I/O ops by using a single read to collect end-of-busy.
*/
- if (multiple || direction == DMA_TO_DEVICE) {
+ if (write) {
t = &host->early_status;
memset(t, 0, sizeof(*t));
- t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
+ t->len = sizeof(scratch->status);
t->tx_buf = host->ones;
- t->tx_dma = host->ones_dma;
t->rx_buf = scratch->status;
- if (dma)
- t->rx_dma = dma + offsetof(struct scratch, status);
t->cs_change = 1;
spi_message_add_tail(t, &host->m);
}
@@ -660,23 +597,13 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
if (host->mmc->use_spi_crc)
scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
- if (host->dma_dev)
- dma_sync_single_for_device(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
status = spi_sync_locked(spi, &host->m);
-
if (status != 0) {
dev_dbg(&spi->dev, "write error (%d)\n", status);
return status;
}
- if (host->dma_dev)
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
-
/*
* Get the transmission data-response reply. It must follow
* immediately after the data block we transferred. This reply
@@ -725,8 +652,6 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
}
t->tx_buf += t->len;
- if (host->dma_dev)
- t->tx_dma += t->len;
/* Return when not busy. If we didn't collect that status yet,
* we'll need some more I/O.
@@ -790,30 +715,12 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
}
leftover = status << 1;
- if (host->dma_dev) {
- dma_sync_single_for_device(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
- dma_sync_single_for_device(host->dma_dev,
- t->rx_dma, t->len,
- DMA_FROM_DEVICE);
- }
-
status = spi_sync_locked(spi, &host->m);
if (status < 0) {
dev_dbg(&spi->dev, "read error %d\n", status);
return status;
}
- if (host->dma_dev) {
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
- dma_sync_single_for_cpu(host->dma_dev,
- t->rx_dma, t->len,
- DMA_FROM_DEVICE);
- }
-
if (bitshift) {
/* Walk through the data and the crc and do
* all the magic to get byte-aligned data.
@@ -848,8 +755,6 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
}
t->rx_buf += t->len;
- if (host->dma_dev)
- t->rx_dma += t->len;
return 0;
}
@@ -864,17 +769,16 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
struct mmc_data *data, u32 blk_size)
{
struct spi_device *spi = host->spi;
- struct device *dma_dev = host->dma_dev;
struct spi_transfer *t;
- enum dma_data_direction direction;
struct scatterlist *sg;
unsigned n_sg;
- int multiple = (data->blocks > 1);
+ bool multiple = (data->blocks > 1);
+ bool write = (data->flags & MMC_DATA_WRITE);
+ const char *write_or_read = write ? "write" : "read";
u32 clock_rate;
unsigned long timeout;
- direction = mmc_get_dma_dir(data);
- mmc_spi_setup_data_message(host, multiple, direction);
+ mmc_spi_setup_data_message(host, multiple, write);
t = &host->t;
if (t->speed_hz)
@@ -891,35 +795,12 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
*/
for_each_sg(data->sg, sg, data->sg_len, n_sg) {
int status = 0;
- dma_addr_t dma_addr = 0;
void *kmap_addr;
unsigned length = sg->length;
- enum dma_data_direction dir = direction;
-
- /* set up dma mapping for controller drivers that might
- * use DMA ... though they may fall back to PIO
- */
- if (dma_dev) {
- /* never invalidate whole *shared* pages ... */
- if ((sg->offset != 0 || length != PAGE_SIZE)
- && dir == DMA_FROM_DEVICE)
- dir = DMA_BIDIRECTIONAL;
-
- dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
- PAGE_SIZE, dir);
- if (dma_mapping_error(dma_dev, dma_addr)) {
- data->error = -EFAULT;
- break;
- }
- if (direction == DMA_TO_DEVICE)
- t->tx_dma = dma_addr + sg->offset;
- else
- t->rx_dma = dma_addr + sg->offset;
- }
/* allow pio too; we don't allow highmem */
kmap_addr = kmap(sg_page(sg));
- if (direction == DMA_TO_DEVICE)
+ if (write)
t->tx_buf = kmap_addr + sg->offset;
else
t->rx_buf = kmap_addr + sg->offset;
@@ -928,11 +809,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
while (length) {
t->len = min(length, blk_size);
- dev_dbg(&host->spi->dev, " %s block, %d bytes\n",
- (direction == DMA_TO_DEVICE) ? "write" : "read",
- t->len);
+ dev_dbg(&spi->dev, " %s block, %d bytes\n", write_or_read, t->len);
- if (direction == DMA_TO_DEVICE)
+ if (write)
status = mmc_spi_writeblock(host, t, timeout);
else
status = mmc_spi_readblock(host, t, timeout);
@@ -947,17 +826,15 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
}
/* discard mappings */
- if (direction == DMA_FROM_DEVICE)
- flush_kernel_dcache_page(sg_page(sg));
+ if (write)
+ /* nothing to do */;
+ else
+ flush_dcache_page(sg_page(sg));
kunmap(sg_page(sg));
- if (dma_dev)
- dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
if (status < 0) {
data->error = status;
- dev_dbg(&spi->dev, "%s status %d\n",
- (direction == DMA_TO_DEVICE) ? "write" : "read",
- status);
+ dev_dbg(&spi->dev, "%s status %d\n", write_or_read, status);
break;
}
}
@@ -968,7 +845,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
* that can affect the STOP_TRAN logic. Complete (and current)
* MMC specs should sort that out before Linux starts using CMD23.
*/
- if (direction == DMA_TO_DEVICE && multiple) {
+ if (write && multiple) {
struct scratch *scratch = host->data;
int tmp;
const unsigned statlen = sizeof(scratch->status);
@@ -988,21 +865,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
scratch->status[0] = SPI_TOKEN_STOP_TRAN;
host->early_status.tx_buf = host->early_status.rx_buf;
- host->early_status.tx_dma = host->early_status.rx_dma;
host->early_status.len = statlen;
- if (host->dma_dev)
- dma_sync_single_for_device(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
-
tmp = spi_sync_locked(spi, &host->m);
-
- if (host->dma_dev)
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
-
if (tmp < 0) {
if (!data->error)
data->error = tmp;
@@ -1065,7 +930,7 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
#endif
/* request exclusive bus access */
- spi_bus_lock(host->spi->master);
+ spi_bus_lock(host->spi->controller);
crc_recover:
/* issue command; then optionally data and stop */
@@ -1097,7 +962,7 @@ crc_recover:
}
/* release the bus */
- spi_bus_unlock(host->spi->master);
+ spi_bus_unlock(host->spi->controller);
mmc_request_done(host->mmc, mrq);
}
@@ -1276,52 +1141,6 @@ mmc_spi_detect_irq(int irq, void *mmc)
return IRQ_HANDLED;
}
-#ifdef CONFIG_HAS_DMA
-static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
-{
- struct spi_device *spi = host->spi;
- struct device *dev;
-
- if (!spi->master->dev.parent->dma_mask)
- return 0;
-
- dev = spi->master->dev.parent;
-
- host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, host->ones_dma))
- return -ENOMEM;
-
- host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, host->data_dma)) {
- dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
- DMA_TO_DEVICE);
- return -ENOMEM;
- }
-
- dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
-
- host->dma_dev = dev;
- return 0;
-}
-
-static void mmc_spi_dma_free(struct mmc_spi_host *host)
-{
- if (!host->dma_dev)
- return;
-
- dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
- DMA_TO_DEVICE);
- dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
-}
-#else
-static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
-static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
-#endif
-
static int mmc_spi_probe(struct spi_device *spi)
{
void *ones;
@@ -1333,7 +1152,7 @@ static int mmc_spi_probe(struct spi_device *spi)
/* We rely on full duplex transfers, mostly to reduce
* per-transfer overheads (by making fewer transfers).
*/
- if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
return -EINVAL;
/* MMC and SD specs only seem to care that sampling is on the
@@ -1366,7 +1185,7 @@ static int mmc_spi_probe(struct spi_device *spi)
goto nomem;
memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
- mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
+ mmc = devm_mmc_alloc_host(&spi->dev, sizeof(*host));
if (!mmc)
goto nomem;
@@ -1386,7 +1205,10 @@ static int mmc_spi_probe(struct spi_device *spi)
* that's the only reason not to use a few MHz for f_min (until
* the upper layer reads the target frequency from the CSD).
*/
- mmc->f_min = 400000;
+ if (spi->controller->min_speed_hz > 400000)
+ dev_warn(&spi->dev,"Controller unable to reduce bus clock to 400 KHz\n");
+
+ mmc->f_min = max(spi->controller->min_speed_hz, 400000);
mmc->f_max = spi->max_speed_hz;
host = mmc_priv(mmc);
@@ -1413,24 +1235,17 @@ static int mmc_spi_probe(struct spi_device *spi)
host->powerup_msecs = 250;
}
- /* preallocate dma buffers */
+ /* Preallocate buffers */
host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
if (!host->data)
goto fail_nobuf1;
- status = mmc_spi_dma_alloc(host);
- if (status)
- goto fail_dma;
-
/* setup message for status/busy readback */
spi_message_init(&host->readback);
- host->readback.is_dma_mapped = (host->dma_dev != NULL);
spi_message_add_tail(&host->status, &host->readback);
host->status.tx_buf = host->ones;
- host->status.tx_dma = host->ones_dma;
host->status.rx_buf = &host->data->status;
- host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
host->status.cs_change = 1;
/* register card detect irq */
@@ -1448,7 +1263,7 @@ static int mmc_spi_probe(struct spi_device *spi)
status = mmc_add_host(mmc);
if (status != 0)
- goto fail_add_host;
+ goto fail_glue_init;
/*
* Index 0 is card detect
@@ -1456,7 +1271,7 @@ static int mmc_spi_probe(struct spi_device *spi)
*/
status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
if (status == -EPROBE_DEFER)
- goto fail_add_host;
+ goto fail_gpiod_request;
if (!status) {
/*
* The platform has a CD GPIO signal that may support
@@ -1471,13 +1286,12 @@ static int mmc_spi_probe(struct spi_device *spi)
/* Index 1 is write protect/read only */
status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
if (status == -EPROBE_DEFER)
- goto fail_add_host;
+ goto fail_gpiod_request;
if (!status)
has_ro = true;
- dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
+ dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n",
dev_name(&mmc->class_dev),
- host->dma_dev ? "" : ", no DMA",
has_ro ? "" : ", no WP",
(host->pdata && host->pdata->setpower)
? "" : ", no poweroff",
@@ -1485,22 +1299,19 @@ static int mmc_spi_probe(struct spi_device *spi)
? ", cd polling" : "");
return 0;
-fail_add_host:
+fail_gpiod_request:
mmc_remove_host(mmc);
fail_glue_init:
- mmc_spi_dma_free(host);
-fail_dma:
kfree(host->data);
fail_nobuf1:
mmc_spi_put_pdata(spi);
- mmc_free_host(mmc);
nomem:
kfree(ones);
return status;
}
-static int mmc_spi_remove(struct spi_device *spi)
+static void mmc_spi_remove(struct spi_device *spi)
{
struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
struct mmc_spi_host *host = mmc_priv(mmc);
@@ -1511,16 +1322,19 @@ static int mmc_spi_remove(struct spi_device *spi)
mmc_remove_host(mmc);
- mmc_spi_dma_free(host);
kfree(host->data);
kfree(host->ones);
spi->max_speed_hz = mmc->f_max;
mmc_spi_put_pdata(spi);
- mmc_free_host(mmc);
- return 0;
}
+static const struct spi_device_id mmc_spi_dev_ids[] = {
+ { "mmc-spi-slot"},
+ { },
+};
+MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids);
+
static const struct of_device_id mmc_spi_of_match_table[] = {
{ .compatible = "mmc-spi-slot", },
{},
@@ -1532,6 +1346,7 @@ static struct spi_driver mmc_spi_driver = {
.name = "mmc_spi",
.of_match_table = mmc_spi_of_match_table,
},
+ .id_table = mmc_spi_dev_ids,
.probe = mmc_spi_probe,
.remove = mmc_spi_remove,
};
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 984d35055156..e500051bd572 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -37,6 +37,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
#include <linux/gpio/consumer.h>
+#include <linux/workqueue.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -248,6 +249,7 @@ static struct variant_data variant_stm32 = {
.f_max = 48000000,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .dma_flow_controller = true,
.init = mmci_variant_init,
};
@@ -270,6 +272,8 @@ static struct variant_data variant_stm32_sdmmc = {
.datactrl_any_blocksz = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.stm32_idmabsize_mask = GENMASK(12, 5),
+ .stm32_idmabsize_align = BIT(5),
+ .supports_sdio_irq = true,
.busy_timeout = true,
.busy_detect = true,
.busy_detect_flag = MCI_STM32_BUSYD0,
@@ -280,7 +284,7 @@ static struct variant_data variant_stm32_sdmmc = {
static struct variant_data variant_stm32_sdmmcv2 = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
- .f_max = 208000000,
+ .f_max = 267000000,
.stm32_clkdiv = true,
.cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
@@ -296,6 +300,37 @@ static struct variant_data variant_stm32_sdmmcv2 = {
.datactrl_any_blocksz = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.stm32_idmabsize_mask = GENMASK(16, 5),
+ .stm32_idmabsize_align = BIT(5),
+ .supports_sdio_irq = true,
+ .dma_lli = true,
+ .busy_timeout = true,
+ .busy_detect = true,
+ .busy_detect_flag = MCI_STM32_BUSYD0,
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
+ .init = sdmmc_variant_init,
+};
+
+static struct variant_data variant_stm32_sdmmcv3 = {
+ .fifosize = 256 * 4,
+ .fifohalfsize = 128 * 4,
+ .f_max = 267000000,
+ .stm32_clkdiv = true,
+ .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
+ .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
+ .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
+ .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
+ .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
+ .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
+ .datactrl_first = true,
+ .datacnt_useless = true,
+ .datalength_bits = 25,
+ .datactrl_blocksz = 14,
+ .datactrl_any_blocksz = true,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
+ .stm32_idmabsize_mask = GENMASK(16, 6),
+ .stm32_idmabsize_align = BIT(6),
+ .supports_sdio_irq = true,
.dma_lli = true,
.busy_timeout = true,
.busy_detect = true,
@@ -389,8 +424,9 @@ void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
*/
static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
{
- /* Keep busy mode in DPSM if enabled */
- datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
+ /* Keep busy mode in DPSM and SDIO mask if enabled */
+ datactrl |= host->datactrl_reg & (host->variant->busy_dpsm_flag |
+ host->variant->datactrl_mask_sdio);
if (host->datactrl_reg != datactrl) {
host->datactrl_reg = datactrl;
@@ -654,10 +690,52 @@ static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
return MCI_DPSM_ENABLE | (host->data->blksz << 16);
}
-static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+static void ux500_busy_clear_mask_done(struct mmci_host *host)
{
void __iomem *base = host->base;
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ writel(readl(base + MMCIMASK0) &
+ ~host->variant->busy_detect_mask, base + MMCIMASK0);
+ host->busy_state = MMCI_BUSY_DONE;
+ host->busy_status = 0;
+}
+
+/*
+ * ux500_busy_complete() - this will wait until the busy status
+ * goes off, saving any status that occur in the meantime into
+ * host->busy_status until we know the card is not busy any more.
+ * The function returns true when the busy detection is ended
+ * and we should continue processing the command.
+ *
+ * The Ux500 typically fires two IRQs over a busy cycle like this:
+ *
+ * DAT0 busy +-----------------+
+ * | |
+ * DAT0 not busy ----+ +--------
+ *
+ * ^ ^
+ * | |
+ * IRQ1 IRQ2
+ */
+static bool ux500_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
+ u32 status, u32 err_msk)
+{
+ void __iomem *base = host->base;
+ int retries = 10;
+
+ if (status & err_msk) {
+ /* Stop any ongoing busy detection if an error occurs */
+ ux500_busy_clear_mask_done(host);
+ goto out_ret_state;
+ }
+
+ /*
+ * The state transitions are encoded in a state machine crossing
+ * the edges in this switch statement.
+ */
+ switch (host->busy_state) {
+
/*
* Before unmasking for the busy end IRQ, confirm that the
* command was sent successfully. To keep track of having a
@@ -667,19 +745,34 @@ static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
* Note that, the card may need a couple of clock cycles before
* it starts signaling busy on DAT0, hence re-read the
* MMCISTATUS register here, to allow the busy bit to be set.
- * Potentially we may even need to poll the register for a
- * while, to allow it to be set, but tests indicates that it
- * isn't needed.
*/
- if (!host->busy_status && !(status & err_msk) &&
- (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
- writel(readl(base + MMCIMASK0) |
- host->variant->busy_detect_mask,
- base + MMCIMASK0);
-
+ case MMCI_BUSY_DONE:
+ /*
+ * Save the first status register read to be sure to catch
+ * all bits that may be lost will retrying. If the command
+ * is still busy this will result in assigning 0 to
+ * host->busy_status, which is what it should be in IDLE.
+ */
host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
- return false;
- }
+ while (retries) {
+ status = readl(base + MMCISTATUS);
+ /* Keep accumulating status bits */
+ host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ if (status & host->variant->busy_detect_flag) {
+ writel(readl(base + MMCIMASK0) |
+ host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ host->busy_state = MMCI_BUSY_WAITING_FOR_START_IRQ;
+ schedule_delayed_work(&host->ux500_busy_timeout_work,
+ msecs_to_jiffies(cmd->busy_timeout));
+ goto out_ret_state;
+ }
+ retries--;
+ }
+ dev_dbg(mmc_dev(host->mmc),
+ "no busy signalling in time CMD%02x\n", cmd->opcode);
+ ux500_busy_clear_mask_done(host);
+ break;
/*
* If there is a command in-progress that has been successfully
@@ -692,27 +785,41 @@ static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
* both the start and the end interrupts needs to be cleared,
* one after the other. So, clear the busy start IRQ here.
*/
- if (host->busy_status &&
- (status & host->variant->busy_detect_flag)) {
- writel(host->variant->busy_detect_mask, base + MMCICLEAR);
- return false;
- }
+ case MMCI_BUSY_WAITING_FOR_START_IRQ:
+ if (status & host->variant->busy_detect_flag) {
+ host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ host->busy_state = MMCI_BUSY_WAITING_FOR_END_IRQ;
+ } else {
+ dev_dbg(mmc_dev(host->mmc),
+ "lost busy status when waiting for busy start IRQ CMD%02x\n",
+ cmd->opcode);
+ cancel_delayed_work(&host->ux500_busy_timeout_work);
+ ux500_busy_clear_mask_done(host);
+ }
+ break;
- /*
- * If there is a command in-progress that has been successfully
- * sent and the busy bit isn't set, it means we have received
- * the busy end IRQ. Clear and mask the IRQ, then continue to
- * process the command.
- */
- if (host->busy_status) {
- writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ case MMCI_BUSY_WAITING_FOR_END_IRQ:
+ if (!(status & host->variant->busy_detect_flag)) {
+ host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ cancel_delayed_work(&host->ux500_busy_timeout_work);
+ ux500_busy_clear_mask_done(host);
+ } else {
+ dev_dbg(mmc_dev(host->mmc),
+ "busy status still asserted when handling busy end IRQ - will keep waiting CMD%02x\n",
+ cmd->opcode);
+ }
+ break;
- writel(readl(base + MMCIMASK0) &
- ~host->variant->busy_detect_mask, base + MMCIMASK0);
- host->busy_status = 0;
+ default:
+ dev_dbg(mmc_dev(host->mmc), "fell through on state %d, CMD%02x\n",
+ host->busy_state, cmd->opcode);
+ break;
}
- return true;
+out_ret_state:
+ return (host->busy_state == MMCI_BUSY_DONE);
}
/*
@@ -762,7 +869,7 @@ int mmci_dmae_setup(struct mmci_host *host)
/*
* If only an RX channel is specified, the driver will
- * attempt to use it bidirectionally, however if it is
+ * attempt to use it bidirectionally, however if it
* is specified but cannot be located, DMA will be disabled.
*/
if (dmae->rx_channel && !dmae->tx_channel)
@@ -913,7 +1020,7 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
- .device_fc = false,
+ .device_fc = variant->dma_flow_controller,
};
struct dma_chan *chan;
struct dma_device *device;
@@ -1214,6 +1321,7 @@ static void
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
{
void __iomem *base = host->base;
+ bool busy_resp = cmd->flags & MMC_RSP_BUSY;
unsigned long long clks;
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
@@ -1238,10 +1346,14 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
c |= host->variant->cmdreg_srsp;
}
- if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) {
- if (!cmd->busy_timeout)
- cmd->busy_timeout = 10 * MSEC_PER_SEC;
+ host->busy_status = 0;
+ host->busy_state = MMCI_BUSY_DONE;
+ /* Assign a default timeout if the core does not provide one */
+ if (busy_resp && !cmd->busy_timeout)
+ cmd->busy_timeout = 10 * MSEC_PER_SEC;
+
+ if (busy_resp && host->variant->busy_timeout) {
if (cmd->busy_timeout > host->mmc->max_busy_timeout)
clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
else
@@ -1382,7 +1494,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
/* Handle busy detection on DAT0 if the variant supports it. */
if (busy_resp && host->variant->busy_detect)
- if (!host->ops->busy_complete(host, status, err_msk))
+ if (!host->ops->busy_complete(host, cmd, status, err_msk))
return;
host->cmd = NULL;
@@ -1394,6 +1506,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
} else if (host->variant->busy_timeout && busy_resp &&
status & MCI_DATATIMEOUT) {
cmd->error = -ETIMEDOUT;
+ /*
+ * This will wake up mmci_irq_thread() which will issue
+ * a hardware reset of the MMCI block.
+ */
host->irq_action = IRQ_WAKE_THREAD;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
@@ -1425,6 +1541,54 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
}
}
+static char *ux500_state_str(struct mmci_host *host)
+{
+ switch (host->busy_state) {
+ case MMCI_BUSY_WAITING_FOR_START_IRQ:
+ return "waiting for start IRQ";
+ case MMCI_BUSY_WAITING_FOR_END_IRQ:
+ return "waiting for end IRQ";
+ case MMCI_BUSY_DONE:
+ return "not waiting for IRQs";
+ default:
+ return "unknown";
+ }
+}
+
+/*
+ * This busy timeout worker is used to "kick" the command IRQ if a
+ * busy detect IRQ fails to appear in reasonable time. Only used on
+ * variants with busy detection IRQ delivery.
+ */
+static void ux500_busy_timeout_work(struct work_struct *work)
+{
+ struct mmci_host *host = container_of(work, struct mmci_host,
+ ux500_busy_timeout_work.work);
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->cmd) {
+ /* If we are still busy let's tag on a cmd-timeout error. */
+ status = readl(host->base + MMCISTATUS);
+ if (status & host->variant->busy_detect_flag) {
+ status |= MCI_CMDTIMEOUT;
+ dev_err(mmc_dev(host->mmc),
+ "timeout in state %s still busy with CMD%02x\n",
+ ux500_state_str(host), host->cmd->opcode);
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "timeout in state %s waiting for busy CMD%02x\n",
+ ux500_state_str(host), host->cmd->opcode);
+ }
+
+ mmci_cmd_irq(host, host->cmd, status);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
{
return remain - (readl(host->base + MMCIFIFOCNT) << 2);
@@ -1602,6 +1766,25 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void mmci_write_sdio_irq_bit(struct mmci_host *host, int enable)
+{
+ void __iomem *base = host->base;
+ u32 mask = readl_relaxed(base + MMCIMASK0);
+
+ if (enable)
+ writel_relaxed(mask | MCI_ST_SDIOITMASK, base + MMCIMASK0);
+ else
+ writel_relaxed(mask & ~MCI_ST_SDIOITMASK, base + MMCIMASK0);
+}
+
+static void mmci_signal_sdio_irq(struct mmci_host *host, u32 status)
+{
+ if (status & MCI_ST_SDIOIT) {
+ mmci_write_sdio_irq_bit(host, 0);
+ sdio_signal_irq(host->mmc);
+ }
+}
+
/*
* Handle completion of command and data transfers.
*/
@@ -1615,6 +1798,8 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
do {
status = readl(host->base + MMCISTATUS);
+ if (!status)
+ break;
if (host->singleirq) {
if (status & host->mask1_reg)
@@ -1644,6 +1829,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
mmci_data_irq(host, host->data, status);
}
+ if (host->variant->supports_sdio_irq)
+ mmci_signal_sdio_irq(host, status);
+
/*
* Busy detection has been handled by mmci_cmd_irq() above.
* Clear the status bit to prevent polling in IRQ context.
@@ -1729,7 +1917,8 @@ static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
return;
if (host->variant->busy_timeout && mmc->actual_clock)
- max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
+ max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
+ MSEC_PER_SEC);
mmc->max_busy_timeout = max_busy_timeout;
}
@@ -1742,10 +1931,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned long flags;
int ret;
- if (host->plat->ios_handler &&
- host->plat->ios_handler(mmc_dev(mmc), ios))
- dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
-
switch (ios->power_mode) {
case MMC_POWER_OFF:
if (!IS_ERR(mmc->supply.vmmc))
@@ -1883,6 +2068,34 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
return ret;
}
+static void mmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ if (enable)
+ /* Keep the SDIO mode bit if SDIO irqs are enabled */
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ spin_lock_irqsave(&host->lock, flags);
+ mmci_write_sdio_irq_bit(host, enable);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (!enable) {
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+ }
+}
+
+static void mmci_ack_sdio_irq(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ mmci_write_sdio_irq_bit(host, 1);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
static struct mmc_host_ops mmci_ops = {
.request = mmci_request,
.pre_req = mmci_pre_request,
@@ -1960,28 +2173,28 @@ static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
if (ret)
return ret;
- if (of_get_property(np, "st,sig-dir-dat0", NULL))
+ if (of_property_read_bool(np, "st,sig-dir-dat0"))
host->pwr_reg_add |= MCI_ST_DATA0DIREN;
- if (of_get_property(np, "st,sig-dir-dat2", NULL))
+ if (of_property_read_bool(np, "st,sig-dir-dat2"))
host->pwr_reg_add |= MCI_ST_DATA2DIREN;
- if (of_get_property(np, "st,sig-dir-dat31", NULL))
+ if (of_property_read_bool(np, "st,sig-dir-dat31"))
host->pwr_reg_add |= MCI_ST_DATA31DIREN;
- if (of_get_property(np, "st,sig-dir-dat74", NULL))
+ if (of_property_read_bool(np, "st,sig-dir-dat74"))
host->pwr_reg_add |= MCI_ST_DATA74DIREN;
- if (of_get_property(np, "st,sig-dir-cmd", NULL))
+ if (of_property_read_bool(np, "st,sig-dir-cmd"))
host->pwr_reg_add |= MCI_ST_CMDDIREN;
- if (of_get_property(np, "st,sig-pin-fbclk", NULL))
+ if (of_property_read_bool(np, "st,sig-pin-fbclk"))
host->pwr_reg_add |= MCI_ST_FBCLKEN;
- if (of_get_property(np, "st,sig-dir", NULL))
+ if (of_property_read_bool(np, "st,sig-dir"))
host->pwr_reg_add |= MCI_STM32_DIRPOL;
- if (of_get_property(np, "st,neg-edge", NULL))
+ if (of_property_read_bool(np, "st,neg-edge"))
host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
- if (of_get_property(np, "st,use-ckin", NULL))
+ if (of_property_read_bool(np, "st,use-ckin"))
mmci_probe_level_translator(mmc);
- if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
+ if (of_property_read_bool(np, "mmc-cap-mmc-highspeed"))
mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
- if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
+ if (of_property_read_bool(np, "mmc-cap-sd-highspeed"))
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
return 0;
@@ -2009,7 +2222,7 @@ static int mmci_probe(struct amba_device *dev,
return -ENOMEM;
}
- mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
+ mmc = devm_mmc_alloc_host(&dev->dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
@@ -2020,7 +2233,7 @@ static int mmci_probe(struct amba_device *dev,
ret = mmci_of_parse(np, mmc);
if (ret)
- goto host_free;
+ return ret;
/*
* Some variant (STM32) doesn't have opendrain bit, nevertheless
@@ -2028,19 +2241,15 @@ static int mmci_probe(struct amba_device *dev,
*/
if (!variant->opendrain) {
host->pinctrl = devm_pinctrl_get(&dev->dev);
- if (IS_ERR(host->pinctrl)) {
- dev_err(&dev->dev, "failed to get pinctrl");
- ret = PTR_ERR(host->pinctrl);
- goto host_free;
- }
+ if (IS_ERR(host->pinctrl))
+ return dev_err_probe(&dev->dev, PTR_ERR(host->pinctrl),
+ "failed to get pinctrl\n");
host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
MMCI_PINCTRL_STATE_OPENDRAIN);
- if (IS_ERR(host->pins_opendrain)) {
- dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
- ret = PTR_ERR(host->pins_opendrain);
- goto host_free;
- }
+ if (IS_ERR(host->pins_opendrain))
+ return dev_err_probe(&dev->dev, PTR_ERR(host->pins_opendrain),
+ "Can't select opendrain pins\n");
}
host->hw_designer = amba_manf(dev);
@@ -2049,14 +2258,12 @@ static int mmci_probe(struct amba_device *dev,
dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
host->clk = devm_clk_get(&dev->dev, NULL);
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- goto host_free;
- }
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
ret = clk_prepare_enable(host->clk);
if (ret)
- goto host_free;
+ return ret;
if (variant->qcom_fifo)
host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
@@ -2126,6 +2333,9 @@ static int mmci_probe(struct amba_device *dev,
ret = PTR_ERR(host->rst);
goto clk_disable;
}
+ ret = reset_control_deassert(host->rst);
+ if (ret)
+ dev_err(mmc_dev(mmc), "failed to de-assert reset\n");
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
@@ -2155,6 +2365,16 @@ static int mmci_probe(struct amba_device *dev,
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
}
+ if (variant->supports_sdio_irq && host->mmc->caps & MMC_CAP_SDIO_IRQ) {
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
+ mmci_ops.enable_sdio_irq = mmci_enable_sdio_irq;
+ mmci_ops.ack_sdio_irq = mmci_ack_sdio_irq;
+
+ mmci_write_datactrlreg(host,
+ host->variant->datactrl_mask_sdio);
+ }
+
/* Variants with mandatory busy timeout in HW needs R1B responses. */
if (variant->busy_timeout)
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
@@ -2237,6 +2457,10 @@ static int mmci_probe(struct amba_device *dev,
goto clk_disable;
}
+ if (host->variant->busy_detect)
+ INIT_DELAYED_WORK(&host->ux500_busy_timeout_work,
+ ux500_busy_timeout_work);
+
writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
amba_set_drvdata(dev, mmc);
@@ -2251,15 +2475,15 @@ static int mmci_probe(struct amba_device *dev,
pm_runtime_set_autosuspend_delay(&dev->dev, 50);
pm_runtime_use_autosuspend(&dev->dev);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto clk_disable;
pm_runtime_put(&dev->dev);
return 0;
clk_disable:
clk_disable_unprepare(host->clk);
- host_free:
- mmc_free_host(mmc);
return ret;
}
@@ -2289,11 +2513,9 @@ static void mmci_remove(struct amba_device *dev)
mmci_dma_release(host);
clk_disable_unprepare(host->clk);
- mmc_free_host(mmc);
}
}
-#ifdef CONFIG_PM
static void mmci_save(struct mmci_host *host)
{
unsigned long flags;
@@ -2358,12 +2580,10 @@ static int mmci_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops mmci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
};
static const struct amba_id mmci_ids[] = {
@@ -2428,6 +2648,16 @@ static const struct amba_id mmci_ids[] = {
.mask = 0xf0ffffff,
.data = &variant_stm32_sdmmcv2,
},
+ {
+ .id = 0x20253180,
+ .mask = 0xf0ffffff,
+ .data = &variant_stm32_sdmmcv2,
+ },
+ {
+ .id = 0x00353180,
+ .mask = 0xf0ffffff,
+ .data = &variant_stm32_sdmmcv3,
+ },
/* Qualcomm variants */
{
.id = 0x00051180,
@@ -2442,7 +2672,8 @@ MODULE_DEVICE_TABLE(amba, mmci_ids);
static struct amba_driver mmci_driver = {
.drv = {
.name = DRIVER_NAME,
- .pm = &mmci_dev_pm_ops,
+ .pm = pm_ptr(&mmci_dev_pm_ops),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = mmci_probe,
.remove = mmci_remove,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index e1a9b96a3396..4d3647f9ec06 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -77,7 +77,7 @@
#define MCI_CPSM_INTERRUPT BIT(8)
#define MCI_CPSM_PENDING BIT(9)
#define MCI_CPSM_ENABLE BIT(10)
-/* Command register flag extenstions in the ST Micro versions */
+/* Command register flag extensions in the ST Micro versions */
#define MCI_CPSM_ST_SDIO_SUSP BIT(11)
#define MCI_CPSM_ST_ENCMD_COMPL BIT(12)
#define MCI_CPSM_ST_NIEN BIT(13)
@@ -218,6 +218,11 @@
#define MCI_STM32_BUSYD0ENDMASK BIT(21)
#define MMCIMASK1 0x040
+
+/* STM32 sdmmc data FIFO threshold register */
+#define MMCI_STM32_FIFOTHRR 0x044
+#define MMCI_STM32_THR_MASK GENMASK(3, 0)
+
#define MMCIFIFOCNT 0x048
#define MMCIFIFO 0x080 /* to 0x0bc */
@@ -227,8 +232,6 @@
#define MMCI_STM32_IDMALLIEN BIT(1)
#define MMCI_STM32_IDMABSIZER 0x054
-#define MMCI_STM32_IDMABNDT_SHIFT 5
-#define MMCI_STM32_IDMABNDT_MASK GENMASK(12, 5)
#define MMCI_STM32_IDMABASE0R 0x058
@@ -262,6 +265,19 @@ struct dma_chan;
struct mmci_host;
/**
+ * enum mmci_busy_state - enumerate the busy detect wait states
+ *
+ * This is used for the state machine waiting for different busy detect
+ * interrupts on hardware that fire a single IRQ for start and end of
+ * the busy detect phase on DAT0.
+ */
+enum mmci_busy_state {
+ MMCI_BUSY_WAITING_FOR_START_IRQ,
+ MMCI_BUSY_WAITING_FOR_END_IRQ,
+ MMCI_BUSY_DONE,
+};
+
+/**
* struct variant_data - MMCI variant-specific quirks
* @clkreg: default value for MCICLOCK register
* @clkreg_enable: enable value for MMCICLOCK register
@@ -315,7 +331,9 @@ struct mmci_host;
* register.
* @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
* @dma_lli: true if variant has dma link list feature.
+ * @supports_sdio_irq: allow SD I/O card to interrupt the host
* @stm32_idmabsize_mask: stm32 sdmmc idma buffer size.
+ * @dma_flow_controller: use peripheral as flow controller for DMA.
*/
struct variant_data {
unsigned int clkreg;
@@ -360,7 +378,10 @@ struct variant_data {
u32 start_err;
u32 opendrain;
u8 dma_lli:1;
+ bool supports_sdio_irq;
u32 stm32_idmabsize_mask;
+ u32 stm32_idmabsize_align;
+ bool dma_flow_controller;
void (*init)(struct mmci_host *host);
};
@@ -380,7 +401,7 @@ struct mmci_host_ops {
void (*dma_error)(struct mmci_host *host);
void (*set_clkreg)(struct mmci_host *host, unsigned int desired);
void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr);
- bool (*busy_complete)(struct mmci_host *host, u32 status, u32 err_msk);
+ bool (*busy_complete)(struct mmci_host *host, struct mmc_command *cmd, u32 status, u32 err_msk);
void (*pre_sig_volt_switch)(struct mmci_host *host);
int (*post_sig_volt_switch)(struct mmci_host *host, struct mmc_ios *ios);
};
@@ -409,6 +430,7 @@ struct mmci_host {
u32 clk_reg;
u32 clk_reg_add;
u32 datactrl_reg;
+ enum mmci_busy_state busy_state;
u32 busy_status;
u32 mask1_reg;
u8 vqmmc_enabled:1;
@@ -437,6 +459,7 @@ struct mmci_host {
void *dma_priv;
s32 next_cookie;
+ struct delayed_work ux500_busy_timeout_work;
};
#define dma_inprogress(host) ((host)->dma_in_progress)
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 51db30acf4dc..9dc51859c2e5 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -15,7 +15,6 @@
#include "mmci.h"
#define SDMMC_LLI_BUF_LEN PAGE_SIZE
-#define SDMMC_IDMA_BURST BIT(MMCI_STM32_IDMABNDT_SHIFT)
#define DLYB_CR 0x0
#define DLYB_CR_DEN BIT(0)
@@ -34,6 +33,20 @@
#define DLYB_LNG_TIMEOUT_US 1000
#define SDMMC_VSWEND_TIMEOUT_US 10000
+#define SYSCFG_DLYBSD_CR 0x0
+#define DLYBSD_CR_EN BIT(0)
+#define DLYBSD_CR_RXTAPSEL_MASK GENMASK(6, 1)
+#define DLYBSD_TAPSEL_NB 32
+#define DLYBSD_BYP_EN BIT(16)
+#define DLYBSD_BYP_CMD GENMASK(21, 17)
+#define DLYBSD_ANTIGLITCH_EN BIT(22)
+
+#define SYSCFG_DLYBSD_SR 0x4
+#define DLYBSD_SR_LOCK BIT(0)
+#define DLYBSD_SR_RXTAPSEL_ACK BIT(1)
+
+#define DLYBSD_TIMEOUT_1S_IN_US 1000000
+
struct sdmmc_lli_desc {
u32 idmalar;
u32 idmabase;
@@ -43,17 +56,33 @@ struct sdmmc_lli_desc {
struct sdmmc_idma {
dma_addr_t sg_dma;
void *sg_cpu;
+ dma_addr_t bounce_dma_addr;
+ void *bounce_buf;
+ bool use_bounce_buffer;
+};
+
+struct sdmmc_dlyb;
+
+struct sdmmc_tuning_ops {
+ int (*dlyb_enable)(struct sdmmc_dlyb *dlyb);
+ void (*set_input_ck)(struct sdmmc_dlyb *dlyb);
+ int (*tuning_prepare)(struct mmci_host *host);
+ int (*set_cfg)(struct sdmmc_dlyb *dlyb, int unit __maybe_unused,
+ int phase, bool sampler __maybe_unused);
};
struct sdmmc_dlyb {
void __iomem *base;
u32 unit;
u32 max;
+ struct sdmmc_tuning_ops *ops;
};
static int sdmmc_idma_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
+ struct sdmmc_idma *idma = host->dma_priv;
+ struct device *dev = mmc_dev(host->mmc);
struct scatterlist *sg;
int i;
@@ -61,41 +90,70 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
* idma has constraints on idmabase & idmasize for each element
* excepted the last element which has no constraint on idmasize
*/
+ idma->use_bounce_buffer = false;
for_each_sg(data->sg, sg, data->sg_len - 1, i) {
- if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) ||
- !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) {
- dev_err(mmc_dev(host->mmc),
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+ !IS_ALIGNED(sg->length,
+ host->variant->stm32_idmabsize_align)) {
+ dev_dbg(mmc_dev(host->mmc),
"unaligned scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
- return -EINVAL;
+ goto use_bounce_buffer;
}
}
- if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) {
- dev_err(mmc_dev(host->mmc),
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ dev_dbg(mmc_dev(host->mmc),
"unaligned last scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
- return -EINVAL;
+ goto use_bounce_buffer;
}
return 0;
+
+use_bounce_buffer:
+ if (!idma->bounce_buf) {
+ idma->bounce_buf = dmam_alloc_coherent(dev,
+ host->mmc->max_req_size,
+ &idma->bounce_dma_addr,
+ GFP_KERNEL);
+ if (!idma->bounce_buf) {
+ dev_err(dev, "Unable to map allocate DMA bounce buffer.\n");
+ return -ENOMEM;
+ }
+ }
+
+ idma->use_bounce_buffer = true;
+
+ return 0;
}
static int _sdmmc_idma_prep_data(struct mmci_host *host,
struct mmc_data *data)
{
- int n_elem;
+ struct sdmmc_idma *idma = host->dma_priv;
- n_elem = dma_map_sg(mmc_dev(host->mmc),
- data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
+ if (idma->use_bounce_buffer) {
+ if (data->flags & MMC_DATA_WRITE) {
+ unsigned int xfer_bytes = data->blksz * data->blocks;
- if (!n_elem) {
- dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
- return -EINVAL;
- }
+ sg_copy_to_buffer(data->sg, data->sg_len,
+ idma->bounce_buf, xfer_bytes);
+ dma_wmb();
+ }
+ } else {
+ int n_elem;
+
+ n_elem = dma_map_sg(mmc_dev(host->mmc),
+ data->sg,
+ data->sg_len,
+ mmc_get_dma_dir(data));
+ if (!n_elem) {
+ dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -112,8 +170,19 @@ static int sdmmc_idma_prep_data(struct mmci_host *host,
static void sdmmc_idma_unprep_data(struct mmci_host *host,
struct mmc_data *data, int err)
{
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ struct sdmmc_idma *idma = host->dma_priv;
+
+ if (idma->use_bounce_buffer) {
+ if (data->flags & MMC_DATA_READ) {
+ unsigned int xfer_bytes = data->blksz * data->blocks;
+
+ sg_copy_from_buffer(data->sg, data->sg_len,
+ idma->bounce_buf, xfer_bytes);
+ }
+ } else {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ }
}
static int sdmmc_idma_setup(struct mmci_host *host)
@@ -137,12 +206,15 @@ static int sdmmc_idma_setup(struct mmci_host *host)
host->mmc->max_segs = SDMMC_LLI_BUF_LEN /
sizeof(struct sdmmc_lli_desc);
host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask;
+
+ host->mmc->max_req_size = SZ_1M;
} else {
host->mmc->max_segs = 1;
host->mmc->max_seg_size = host->mmc->max_req_size;
}
- return dma_set_max_seg_size(dev, host->mmc->max_seg_size);
+ dma_set_max_seg_size(dev, host->mmc->max_seg_size);
+ return 0;
}
static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
@@ -154,8 +226,18 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
struct scatterlist *sg;
int i;
- if (!host->variant->dma_lli || data->sg_len == 1) {
- writel_relaxed(sg_dma_address(data->sg),
+ host->dma_in_progress = true;
+
+ if (!host->variant->dma_lli || data->sg_len == 1 ||
+ idma->use_bounce_buffer) {
+ u32 dma_addr;
+
+ if (idma->use_bounce_buffer)
+ dma_addr = idma->bounce_dma_addr;
+ else
+ dma_addr = sg_dma_address(data->sg);
+
+ writel_relaxed(dma_addr,
host->base + MMCI_STM32_IDMABASE0R);
writel_relaxed(MMCI_STM32_IDMAEN,
host->base + MMCI_STM32_IDMACTRLR);
@@ -184,9 +266,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
return 0;
}
+static void sdmmc_idma_error(struct mmci_host *host)
+{
+ struct mmc_data *data = host->data;
+ struct sdmmc_idma *idma = host->dma_priv;
+
+ if (!dma_inprogress(host))
+ return;
+
+ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
+ data->host_cookie = 0;
+
+ if (!idma->use_bounce_buffer)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+}
+
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
+ if (!dma_inprogress(host))
+ return;
+
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
if (!data->host_cookie)
sdmmc_idma_unprep_data(host, data, 0);
@@ -239,22 +342,13 @@ static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)
clk |= host->clk_reg_add;
clk |= ddr;
- /*
- * SDMMC_FBCK is selected when an external Delay Block is needed
- * with SDR104.
- */
- if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) {
+ if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50)
clk |= MCI_STM32_CLK_BUSSPEED;
- if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) {
- clk &= ~MCI_STM32_CLK_SEL_MSK;
- clk |= MCI_STM32_CLK_SELFBCK;
- }
- }
mmci_write_clkreg(host, clk);
}
-static void sdmmc_dlyb_input_ck(struct sdmmc_dlyb *dlyb)
+static void sdmmc_dlyb_mp15_input_ck(struct sdmmc_dlyb *dlyb)
{
if (!dlyb || !dlyb->base)
return;
@@ -271,7 +365,8 @@ static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
/* adds OF options */
pwr = host->pwr_reg_add;
- sdmmc_dlyb_input_ck(dlyb);
+ if (dlyb && dlyb->ops->set_input_ck)
+ dlyb->ops->set_input_ck(dlyb);
if (ios.power_mode == MMC_POWER_OFF) {
/* Only a reset could power-off sdmmc */
@@ -316,6 +411,19 @@ static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
datactrl = mmci_dctrl_blksz(host);
+ if (host->hw_revision >= 3) {
+ u32 thr = 0;
+
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104 ||
+ host->mmc->ios.timing == MMC_TIMING_MMC_HS200) {
+ thr = ffs(min_t(unsigned int, host->data->blksz,
+ host->variant->fifosize));
+ thr = min_t(u32, thr, MMCI_STM32_THR_MASK);
+ }
+
+ writel_relaxed(thr, host->base + MMCI_STM32_FIFOTHRR);
+ }
+
if (host->mmc->card && mmc_card_sdio(host->mmc->card) &&
host->data->blocks == 1)
datactrl |= MCI_DPSM_STM32_MODE_SDIO;
@@ -327,7 +435,8 @@ static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
return datactrl;
}
-static bool sdmmc_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+static bool sdmmc_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
+ u32 status, u32 err_msk)
{
void __iomem *base = host->base;
u32 busy_d0, busy_d0end, mask, sdmmc_status;
@@ -368,8 +477,15 @@ complete:
return true;
}
-static void sdmmc_dlyb_set_cfgr(struct sdmmc_dlyb *dlyb,
- int unit, int phase, bool sampler)
+static int sdmmc_dlyb_mp15_enable(struct sdmmc_dlyb *dlyb)
+{
+ writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR);
+
+ return 0;
+}
+
+static int sdmmc_dlyb_mp15_set_cfg(struct sdmmc_dlyb *dlyb,
+ int unit, int phase, bool sampler)
{
u32 cfgr;
@@ -381,16 +497,18 @@ static void sdmmc_dlyb_set_cfgr(struct sdmmc_dlyb *dlyb,
if (!sampler)
writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR);
+
+ return 0;
}
-static int sdmmc_dlyb_lng_tuning(struct mmci_host *host)
+static int sdmmc_dlyb_mp15_prepare(struct mmci_host *host)
{
struct sdmmc_dlyb *dlyb = host->variant_priv;
u32 cfgr;
int i, lng, ret;
for (i = 0; i <= DLYB_CFGR_UNIT_MAX; i++) {
- sdmmc_dlyb_set_cfgr(dlyb, i, DLYB_CFGR_SEL_MAX, true);
+ dlyb->ops->set_cfg(dlyb, i, DLYB_CFGR_SEL_MAX, true);
ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr,
(cfgr & DLYB_CFGR_LNGF),
@@ -416,14 +534,58 @@ static int sdmmc_dlyb_lng_tuning(struct mmci_host *host)
return 0;
}
+static int sdmmc_dlyb_mp25_enable(struct sdmmc_dlyb *dlyb)
+{
+ u32 cr, sr;
+
+ cr = readl_relaxed(dlyb->base + SYSCFG_DLYBSD_CR);
+ cr |= DLYBSD_CR_EN;
+
+ writel_relaxed(cr, dlyb->base + SYSCFG_DLYBSD_CR);
+
+ return readl_relaxed_poll_timeout(dlyb->base + SYSCFG_DLYBSD_SR,
+ sr, sr & DLYBSD_SR_LOCK, 1,
+ DLYBSD_TIMEOUT_1S_IN_US);
+}
+
+static int sdmmc_dlyb_mp25_set_cfg(struct sdmmc_dlyb *dlyb,
+ int unit __maybe_unused, int phase,
+ bool sampler __maybe_unused)
+{
+ u32 cr, sr;
+
+ cr = readl_relaxed(dlyb->base + SYSCFG_DLYBSD_CR);
+ cr &= ~DLYBSD_CR_RXTAPSEL_MASK;
+ cr |= FIELD_PREP(DLYBSD_CR_RXTAPSEL_MASK, phase);
+
+ writel_relaxed(cr, dlyb->base + SYSCFG_DLYBSD_CR);
+
+ return readl_relaxed_poll_timeout(dlyb->base + SYSCFG_DLYBSD_SR,
+ sr, sr & DLYBSD_SR_RXTAPSEL_ACK, 1,
+ DLYBSD_TIMEOUT_1S_IN_US);
+}
+
+static int sdmmc_dlyb_mp25_prepare(struct mmci_host *host)
+{
+ struct sdmmc_dlyb *dlyb = host->variant_priv;
+
+ dlyb->max = DLYBSD_TAPSEL_NB;
+
+ return 0;
+}
+
static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
{
struct sdmmc_dlyb *dlyb = host->variant_priv;
int cur_len = 0, max_len = 0, end_of_len = 0;
- int phase;
+ int phase, ret;
for (phase = 0; phase <= dlyb->max; phase++) {
- sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
+ ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "tuning config failed\n");
+ return ret;
+ }
if (mmc_send_tuning(host->mmc, opcode, NULL)) {
cur_len = 0;
@@ -441,8 +603,15 @@ static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
return -EINVAL;
}
+ if (dlyb->ops->set_input_ck)
+ dlyb->ops->set_input_ck(dlyb);
+
phase = end_of_len - max_len / 2;
- sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
+ ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "tuning reconfig failed\n");
+ return ret;
+ }
dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n",
dlyb->unit, dlyb->max, phase);
@@ -454,12 +623,33 @@ static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct mmci_host *host = mmc_priv(mmc);
struct sdmmc_dlyb *dlyb = host->variant_priv;
+ u32 clk;
+ int ret;
+
+ if ((host->mmc->ios.timing != MMC_TIMING_UHS_SDR104 &&
+ host->mmc->ios.timing != MMC_TIMING_MMC_HS200) ||
+ host->mmc->actual_clock <= 50000000)
+ return 0;
if (!dlyb || !dlyb->base)
return -EINVAL;
- if (sdmmc_dlyb_lng_tuning(host))
- return -EINVAL;
+ ret = dlyb->ops->dlyb_enable(dlyb);
+ if (ret)
+ return ret;
+
+ /*
+ * SDMMC_FBCK is selected when an external Delay Block is needed
+ * with SDR104 or HS200.
+ */
+ clk = host->clk_reg;
+ clk &= ~MCI_STM32_CLK_SEL_MSK;
+ clk |= MCI_STM32_CLK_SELFBCK;
+ mmci_write_clkreg(host, clk);
+
+ ret = dlyb->ops->tuning_prepare(host);
+ if (ret)
+ return ret;
return sdmmc_dlyb_phase_tuning(host, opcode);
}
@@ -479,8 +669,9 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
u32 status;
int ret = 0;
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
- spin_lock_irqsave(&host->lock, flags);
+ spin_lock_irqsave(&host->lock, flags);
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 &&
+ host->pwr_reg & MCI_STM32_VSWITCHEN) {
mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
spin_unlock_irqrestore(&host->lock, flags);
@@ -492,9 +683,11 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
host->base + MMCICLEAR);
+ spin_lock_irqsave(&host->lock, flags);
mmci_write_pwrreg(host, host->pwr_reg &
~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
}
+ spin_unlock_irqrestore(&host->lock, flags);
return ret;
}
@@ -507,6 +700,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_setup = sdmmc_idma_setup,
.dma_start = sdmmc_idma_start,
.dma_finalize = sdmmc_idma_finalize,
+ .dma_error = sdmmc_idma_error,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
.busy_complete = sdmmc_busy_complete,
@@ -514,6 +708,19 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.post_sig_volt_switch = sdmmc_post_sig_volt_switch,
};
+static struct sdmmc_tuning_ops dlyb_tuning_mp15_ops = {
+ .dlyb_enable = sdmmc_dlyb_mp15_enable,
+ .set_input_ck = sdmmc_dlyb_mp15_input_ck,
+ .tuning_prepare = sdmmc_dlyb_mp15_prepare,
+ .set_cfg = sdmmc_dlyb_mp15_set_cfg,
+};
+
+static struct sdmmc_tuning_ops dlyb_tuning_mp25_ops = {
+ .dlyb_enable = sdmmc_dlyb_mp25_enable,
+ .tuning_prepare = sdmmc_dlyb_mp25_prepare,
+ .set_cfg = sdmmc_dlyb_mp25_set_cfg,
+};
+
void sdmmc_variant_init(struct mmci_host *host)
{
struct device_node *np = host->mmc->parent->of_node;
@@ -532,6 +739,11 @@ void sdmmc_variant_init(struct mmci_host *host)
return;
dlyb->base = base_dlyb;
+ if (of_device_is_compatible(np, "st,stm32mp25-sdmmc2"))
+ dlyb->ops = &dlyb_tuning_mp25_ops;
+ else
+ dlyb->ops = &dlyb_tuning_mp15_ops;
+
host->variant_priv = dlyb;
host->mmc_ops->execute_tuning = sdmmc_execute_tuning;
}
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index bde298887579..3dd8f232052f 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -111,8 +111,8 @@
#define CLK_DIV_MASK 0x7f
/* REG_BUS_WIDTH */
-#define BUS_WIDTH_8 BIT(2)
-#define BUS_WIDTH_4 BIT(1)
+#define BUS_WIDTH_4_SUPPORT BIT(3)
+#define BUS_WIDTH_4 BIT(2)
#define BUS_WIDTH_1 BIT(0)
#define MMC_VDD_360 23
@@ -254,6 +254,11 @@ static void moxart_dma_complete(void *param)
complete(&host->dma_complete);
}
+static bool moxart_use_dma(struct moxart_host *host)
+{
+ return (host->data_len > host->fifo_width) && host->have_dma;
+}
+
static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
{
u32 len, dir_slave;
@@ -291,11 +296,11 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
dma_async_issue_pending(dma_chan);
}
- data->bytes_xfered += host->data_remain;
-
wait_for_completion_interruptible_timeout(&host->dma_complete,
host->timeout);
+ data->bytes_xfered = host->data_len;
+
dma_unmap_sg(dma_chan->device->dev,
data->sg, data->sg_len,
mmc_get_dma_dir(data));
@@ -338,13 +343,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
return;
}
for (len = 0; len < remain && len < host->fifo_width;) {
- /* SCR data must be read in big endian. */
- if (data->mrq->cmd->opcode == SD_APP_SEND_SCR)
- *sgp = ioread32be(host->base +
- REG_DATA_WINDOW);
- else
- *sgp = ioread32(host->base +
- REG_DATA_WINDOW);
+ *sgp = ioread32(host->base + REG_DATA_WINDOW);
sgp++;
len += 4;
}
@@ -381,7 +380,7 @@ static void moxart_prepare_data(struct moxart_host *host)
if (data->flags & MMC_DATA_WRITE)
datactrl |= DCR_DATA_WRITE;
- if ((host->data_len > host->fifo_width) && host->have_dma)
+ if (moxart_use_dma(host))
datactrl |= DCR_DMA_EN;
writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
@@ -413,7 +412,7 @@ static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
moxart_send_command(host, host->mrq->cmd);
if (mrq->cmd->data) {
- if ((host->data_len > host->fifo_width) && host->have_dma) {
+ if (moxart_use_dma(host)) {
writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
@@ -524,9 +523,6 @@ static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_BUS_WIDTH_4:
writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
break;
- case MMC_BUS_WIDTH_8:
- writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH);
- break;
default:
writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
break;
@@ -562,41 +558,33 @@ static int moxart_probe(struct platform_device *pdev)
int irq, ret;
u32 i;
- mmc = mmc_alloc_host(sizeof(struct moxart_host), dev);
+ mmc = devm_mmc_alloc_host(dev, sizeof(*host));
if (!mmc) {
- dev_err(dev, "mmc_alloc_host failed\n");
- ret = -ENOMEM;
- goto out;
+ dev_err(dev, "devm_mmc_alloc_host failed\n");
+ return -ENOMEM;
}
ret = of_address_to_resource(node, 0, &res_mmc);
- if (ret) {
- dev_err(dev, "of_address_to_resource failed\n");
- goto out;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "of_address_to_resource failed\n");
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0) {
- dev_err(dev, "irq_of_parse_and_map failed\n");
- ret = -EINVAL;
- goto out;
- }
+ if (irq <= 0)
+ return dev_err_probe(dev, -EINVAL,
+ "irq_of_parse_and_map failed\n");
clk = devm_clk_get(dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto out;
- }
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
reg_mmc = devm_ioremap_resource(dev, &res_mmc);
- if (IS_ERR(reg_mmc)) {
- ret = PTR_ERR(reg_mmc);
- goto out;
- }
+ if (IS_ERR(reg_mmc))
+ return PTR_ERR(reg_mmc);
ret = mmc_of_parse(mmc);
if (ret)
- goto out;
+ return ret;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -614,6 +602,9 @@ static int moxart_probe(struct platform_device *pdev)
mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2);
mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2);
mmc->ocr_avail = 0xffff00; /* Support 2.0v - 3.6v power. */
+ mmc->max_blk_size = 2048; /* Max. block length in REG_DATA_CONTROL */
+ mmc->max_req_size = DATA_LEN_MASK; /* bits 0-23 in REG_DATA_LENGTH */
+ mmc->max_blk_count = mmc->max_req_size / 512;
if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) {
if (PTR_ERR(host->dma_chan_tx) == -EPROBE_DEFER ||
@@ -621,13 +612,24 @@ static int moxart_probe(struct platform_device *pdev)
ret = -EPROBE_DEFER;
goto out;
}
+ if (!IS_ERR(host->dma_chan_tx)) {
+ dma_release_channel(host->dma_chan_tx);
+ host->dma_chan_tx = NULL;
+ }
+ if (!IS_ERR(host->dma_chan_rx)) {
+ dma_release_channel(host->dma_chan_rx);
+ host->dma_chan_rx = NULL;
+ }
dev_dbg(dev, "PIO mode transfer enabled\n");
host->have_dma = false;
+
+ mmc->max_seg_size = mmc->max_req_size;
} else {
dev_dbg(dev, "DMA channels found (%p,%p)\n",
host->dma_chan_tx, host->dma_chan_rx);
host->have_dma = true;
+ memset(&cfg, 0, sizeof(cfg));
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -640,18 +642,14 @@ static int moxart_probe(struct platform_device *pdev)
cfg.src_addr = host->reg_phys + REG_DATA_WINDOW;
cfg.dst_addr = 0;
dmaengine_slave_config(host->dma_chan_rx, &cfg);
+
+ mmc->max_seg_size = min3(mmc->max_req_size,
+ dma_get_max_seg_size(host->dma_chan_rx->device->dev),
+ dma_get_max_seg_size(host->dma_chan_tx->device->dev));
}
- switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) {
- case 1:
+ if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 2:
- mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
- break;
- default:
- break;
- }
writel(0, host->base + REG_INTERRUPT_MASK);
@@ -667,38 +665,37 @@ static int moxart_probe(struct platform_device *pdev)
goto out;
dev_set_drvdata(dev, mmc);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto out;
dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width);
return 0;
out:
- if (mmc)
- mmc_free_host(mmc);
+ if (!IS_ERR_OR_NULL(host->dma_chan_tx))
+ dma_release_channel(host->dma_chan_tx);
+ if (!IS_ERR_OR_NULL(host->dma_chan_rx))
+ dma_release_channel(host->dma_chan_rx);
return ret;
}
-static int moxart_remove(struct platform_device *pdev)
+static void moxart_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = dev_get_drvdata(&pdev->dev);
struct moxart_host *host = mmc_priv(mmc);
- dev_set_drvdata(&pdev->dev, NULL);
-
- if (!IS_ERR(host->dma_chan_tx))
+ if (!IS_ERR_OR_NULL(host->dma_chan_tx))
dma_release_channel(host->dma_chan_tx);
- if (!IS_ERR(host->dma_chan_rx))
+ if (!IS_ERR_OR_NULL(host->dma_chan_rx))
dma_release_channel(host->dma_chan_rx);
mmc_remove_host(mmc);
- mmc_free_host(mmc);
writel(0, host->base + REG_INTERRUPT_MASK);
writel(0, host->base + REG_POWER_CONTROL);
writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
host->base + REG_CLOCK_CONTROL);
-
- return 0;
}
static const struct of_device_id moxart_mmc_match[] = {
@@ -710,7 +707,7 @@ MODULE_DEVICE_TABLE(of, moxart_mmc_match);
static struct platform_driver moxart_mmc_driver = {
.probe = moxart_probe,
- .remove = moxart_remove,
+ .remove = moxart_remove,
.driver = {
.name = "mmc-moxart",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 4dfc246c5f95..daed659f63f6 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1,23 +1,24 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2015 MediaTek Inc.
+ * Copyright (c) 2014-2015, 2022 MediaTek Inc.
* Author: Chaotian.Jing <chaotian.jing@mediatek.com>
*/
#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -33,6 +34,7 @@
#include <linux/mmc/slot-gpio.h>
#include "cqhci.h"
+#include "mmc_hsq.h"
#define MAX_BD_NUM 1024
#define MSDC_NR_CLOCKS 3
@@ -65,6 +67,7 @@
#define SDC_RESP3 0x4c
#define SDC_BLK_NUM 0x50
#define SDC_ADV_CFG0 0x64
+#define MSDC_NEW_RX_CFG 0x68
#define EMMC_IOCON 0x7c
#define SDC_ACMD_RESP 0x80
#define DMA_SA_H4BIT 0x8c
@@ -81,6 +84,7 @@
#define EMMC51_CFG0 0x204
#define EMMC50_CFG0 0x208
#define EMMC50_CFG1 0x20c
+#define EMMC50_CFG2 0x21c
#define EMMC50_CFG3 0x220
#define SDC_FIFO_CFG 0x228
#define CQHCI_SETTING 0x7fc
@@ -91,226 +95,269 @@
#define EMMC_TOP_CONTROL 0x00
#define EMMC_TOP_CMD 0x04
#define EMMC50_PAD_DS_TUNE 0x0c
+#define LOOP_TEST_CONTROL 0x30
/*--------------------------------------------------------------------------*/
/* Register Mask */
/*--------------------------------------------------------------------------*/
/* MSDC_CFG mask */
-#define MSDC_CFG_MODE (0x1 << 0) /* RW */
-#define MSDC_CFG_CKPDN (0x1 << 1) /* RW */
-#define MSDC_CFG_RST (0x1 << 2) /* RW */
-#define MSDC_CFG_PIO (0x1 << 3) /* RW */
-#define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */
-#define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */
-#define MSDC_CFG_BV18PSS (0x1 << 6) /* R */
-#define MSDC_CFG_CKSTB (0x1 << 7) /* R */
-#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
-#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
-#define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */
-#define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */
-#define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */
-#define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */
+#define MSDC_CFG_MODE BIT(0) /* RW */
+#define MSDC_CFG_CKPDN BIT(1) /* RW */
+#define MSDC_CFG_RST BIT(2) /* RW */
+#define MSDC_CFG_PIO BIT(3) /* RW */
+#define MSDC_CFG_CKDRVEN BIT(4) /* RW */
+#define MSDC_CFG_BV18SDT BIT(5) /* RW */
+#define MSDC_CFG_BV18PSS BIT(6) /* R */
+#define MSDC_CFG_CKSTB BIT(7) /* R */
+#define MSDC_CFG_CKDIV GENMASK(15, 8) /* RW */
+#define MSDC_CFG_CKMOD GENMASK(17, 16) /* RW */
+#define MSDC_CFG_HS400_CK_MODE BIT(18) /* RW */
+#define MSDC_CFG_HS400_CK_MODE_EXTRA BIT(22) /* RW */
+#define MSDC_CFG_CKDIV_EXTRA GENMASK(19, 8) /* RW */
+#define MSDC_CFG_CKMOD_EXTRA GENMASK(21, 20) /* RW */
/* MSDC_IOCON mask */
-#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
-#define MSDC_IOCON_RSPL (0x1 << 1) /* RW */
-#define MSDC_IOCON_DSPL (0x1 << 2) /* RW */
-#define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */
-#define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */
-#define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */
-#define MSDC_IOCON_W_DSPL (0x1 << 8) /* RW */
-#define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */
-#define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */
-#define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */
-#define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */
-#define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */
-#define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */
-#define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */
-#define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */
-#define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */
+#define MSDC_IOCON_SDR104CKS BIT(0) /* RW */
+#define MSDC_IOCON_RSPL BIT(1) /* RW */
+#define MSDC_IOCON_DSPL BIT(2) /* RW */
+#define MSDC_IOCON_DDLSEL BIT(3) /* RW */
+#define MSDC_IOCON_DDR50CKD BIT(4) /* RW */
+#define MSDC_IOCON_DSPLSEL BIT(5) /* RW */
+#define MSDC_IOCON_W_DSPL BIT(8) /* RW */
+#define MSDC_IOCON_D0SPL BIT(16) /* RW */
+#define MSDC_IOCON_D1SPL BIT(17) /* RW */
+#define MSDC_IOCON_D2SPL BIT(18) /* RW */
+#define MSDC_IOCON_D3SPL BIT(19) /* RW */
+#define MSDC_IOCON_D4SPL BIT(20) /* RW */
+#define MSDC_IOCON_D5SPL BIT(21) /* RW */
+#define MSDC_IOCON_D6SPL BIT(22) /* RW */
+#define MSDC_IOCON_D7SPL BIT(23) /* RW */
+#define MSDC_IOCON_RISCSZ GENMASK(25, 24) /* RW */
/* MSDC_PS mask */
-#define MSDC_PS_CDEN (0x1 << 0) /* RW */
-#define MSDC_PS_CDSTS (0x1 << 1) /* R */
-#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */
-#define MSDC_PS_DAT (0xff << 16) /* R */
-#define MSDC_PS_DATA1 (0x1 << 17) /* R */
-#define MSDC_PS_CMD (0x1 << 24) /* R */
-#define MSDC_PS_WP (0x1 << 31) /* R */
+#define MSDC_PS_CDEN BIT(0) /* RW */
+#define MSDC_PS_CDSTS BIT(1) /* R */
+#define MSDC_PS_CDDEBOUNCE GENMASK(15, 12) /* RW */
+#define MSDC_PS_DAT GENMASK(23, 16) /* R */
+#define MSDC_PS_DATA1 BIT(17) /* R */
+#define MSDC_PS_CMD BIT(24) /* R */
+#define MSDC_PS_WP BIT(31) /* R */
/* MSDC_INT mask */
-#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */
-#define MSDC_INT_CDSC (0x1 << 1) /* W1C */
-#define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */
-#define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */
-#define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */
-#define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */
-#define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */
-#define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */
-#define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */
-#define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */
-#define MSDC_INT_CSTA (0x1 << 11) /* R */
-#define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */
-#define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */
-#define MSDC_INT_DATTMO (0x1 << 14) /* W1C */
-#define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */
-#define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */
-#define MSDC_INT_DMA_BDCSERR (0x1 << 17) /* W1C */
-#define MSDC_INT_DMA_GPDCSERR (0x1 << 18) /* W1C */
-#define MSDC_INT_DMA_PROTECT (0x1 << 19) /* W1C */
-#define MSDC_INT_CMDQ (0x1 << 28) /* W1C */
+#define MSDC_INT_MMCIRQ BIT(0) /* W1C */
+#define MSDC_INT_CDSC BIT(1) /* W1C */
+#define MSDC_INT_ACMDRDY BIT(3) /* W1C */
+#define MSDC_INT_ACMDTMO BIT(4) /* W1C */
+#define MSDC_INT_ACMDCRCERR BIT(5) /* W1C */
+#define MSDC_INT_DMAQ_EMPTY BIT(6) /* W1C */
+#define MSDC_INT_SDIOIRQ BIT(7) /* W1C */
+#define MSDC_INT_CMDRDY BIT(8) /* W1C */
+#define MSDC_INT_CMDTMO BIT(9) /* W1C */
+#define MSDC_INT_RSPCRCERR BIT(10) /* W1C */
+#define MSDC_INT_CSTA BIT(11) /* R */
+#define MSDC_INT_XFER_COMPL BIT(12) /* W1C */
+#define MSDC_INT_DXFER_DONE BIT(13) /* W1C */
+#define MSDC_INT_DATTMO BIT(14) /* W1C */
+#define MSDC_INT_DATCRCERR BIT(15) /* W1C */
+#define MSDC_INT_ACMD19_DONE BIT(16) /* W1C */
+#define MSDC_INT_DMA_BDCSERR BIT(17) /* W1C */
+#define MSDC_INT_DMA_GPDCSERR BIT(18) /* W1C */
+#define MSDC_INT_DMA_PROTECT BIT(19) /* W1C */
+#define MSDC_INT_CMDQ BIT(28) /* W1C */
/* MSDC_INTEN mask */
-#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */
-#define MSDC_INTEN_CDSC (0x1 << 1) /* RW */
-#define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */
-#define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */
-#define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */
-#define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */
-#define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */
-#define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */
-#define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */
-#define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */
-#define MSDC_INTEN_CSTA (0x1 << 11) /* RW */
-#define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */
-#define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */
-#define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */
-#define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */
-#define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */
-#define MSDC_INTEN_DMA_BDCSERR (0x1 << 17) /* RW */
-#define MSDC_INTEN_DMA_GPDCSERR (0x1 << 18) /* RW */
-#define MSDC_INTEN_DMA_PROTECT (0x1 << 19) /* RW */
+#define MSDC_INTEN_MMCIRQ BIT(0) /* RW */
+#define MSDC_INTEN_CDSC BIT(1) /* RW */
+#define MSDC_INTEN_ACMDRDY BIT(3) /* RW */
+#define MSDC_INTEN_ACMDTMO BIT(4) /* RW */
+#define MSDC_INTEN_ACMDCRCERR BIT(5) /* RW */
+#define MSDC_INTEN_DMAQ_EMPTY BIT(6) /* RW */
+#define MSDC_INTEN_SDIOIRQ BIT(7) /* RW */
+#define MSDC_INTEN_CMDRDY BIT(8) /* RW */
+#define MSDC_INTEN_CMDTMO BIT(9) /* RW */
+#define MSDC_INTEN_RSPCRCERR BIT(10) /* RW */
+#define MSDC_INTEN_CSTA BIT(11) /* RW */
+#define MSDC_INTEN_XFER_COMPL BIT(12) /* RW */
+#define MSDC_INTEN_DXFER_DONE BIT(13) /* RW */
+#define MSDC_INTEN_DATTMO BIT(14) /* RW */
+#define MSDC_INTEN_DATCRCERR BIT(15) /* RW */
+#define MSDC_INTEN_ACMD19_DONE BIT(16) /* RW */
+#define MSDC_INTEN_DMA_BDCSERR BIT(17) /* RW */
+#define MSDC_INTEN_DMA_GPDCSERR BIT(18) /* RW */
+#define MSDC_INTEN_DMA_PROTECT BIT(19) /* RW */
/* MSDC_FIFOCS mask */
-#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */
-#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */
-#define MSDC_FIFOCS_CLR (0x1 << 31) /* RW */
+#define MSDC_FIFOCS_RXCNT GENMASK(7, 0) /* R */
+#define MSDC_FIFOCS_TXCNT GENMASK(23, 16) /* R */
+#define MSDC_FIFOCS_CLR BIT(31) /* RW */
/* SDC_CFG mask */
-#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */
-#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */
-#define SDC_CFG_WRDTOC (0x1fff << 2) /* RW */
-#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */
-#define SDC_CFG_SDIO (0x1 << 19) /* RW */
-#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */
-#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */
-#define SDC_CFG_DTOC (0xff << 24) /* RW */
+#define SDC_CFG_SDIOINTWKUP BIT(0) /* RW */
+#define SDC_CFG_INSWKUP BIT(1) /* RW */
+#define SDC_CFG_WRDTOC GENMASK(14, 2) /* RW */
+#define SDC_CFG_BUSWIDTH GENMASK(17, 16) /* RW */
+#define SDC_CFG_SDIO BIT(19) /* RW */
+#define SDC_CFG_SDIOIDE BIT(20) /* RW */
+#define SDC_CFG_INTATGAP BIT(21) /* RW */
+#define SDC_CFG_DTOC GENMASK(31, 24) /* RW */
/* SDC_STS mask */
-#define SDC_STS_SDCBUSY (0x1 << 0) /* RW */
-#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
-#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
+#define SDC_STS_SDCBUSY BIT(0) /* RW */
+#define SDC_STS_CMDBUSY BIT(1) /* RW */
+#define SDC_STS_SWR_COMPL BIT(31) /* RW */
-#define SDC_DAT1_IRQ_TRIGGER (0x1 << 19) /* RW */
/* SDC_ADV_CFG0 mask */
-#define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */
+#define SDC_DAT1_IRQ_TRIGGER BIT(19) /* RW */
+#define SDC_RX_ENHANCE_EN BIT(20) /* RW */
+#define SDC_NEW_TX_EN BIT(31) /* RW */
+
+/* MSDC_NEW_RX_CFG mask */
+#define MSDC_NEW_RX_PATH_SEL BIT(0) /* RW */
/* DMA_SA_H4BIT mask */
-#define DMA_ADDR_HIGH_4BIT (0xf << 0) /* RW */
+#define DMA_ADDR_HIGH_4BIT GENMASK(3, 0) /* RW */
/* MSDC_DMA_CTRL mask */
-#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
-#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
-#define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */
-#define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */
-#define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */
-#define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */
+#define MSDC_DMA_CTRL_START BIT(0) /* W */
+#define MSDC_DMA_CTRL_STOP BIT(1) /* W */
+#define MSDC_DMA_CTRL_RESUME BIT(2) /* W */
+#define MSDC_DMA_CTRL_MODE BIT(8) /* RW */
+#define MSDC_DMA_CTRL_LASTBUF BIT(10) /* RW */
+#define MSDC_DMA_CTRL_BRUSTSZ GENMASK(14, 12) /* RW */
/* MSDC_DMA_CFG mask */
-#define MSDC_DMA_CFG_STS (0x1 << 0) /* R */
-#define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */
-#define MSDC_DMA_CFG_AHBHPROT2 (0x2 << 8) /* RW */
-#define MSDC_DMA_CFG_ACTIVEEN (0x2 << 12) /* RW */
-#define MSDC_DMA_CFG_CS12B16B (0x1 << 16) /* RW */
+#define MSDC_DMA_CFG_STS BIT(0) /* R */
+#define MSDC_DMA_CFG_DECSEN BIT(1) /* RW */
+#define MSDC_DMA_CFG_AHBHPROT2 BIT(9) /* RW */
+#define MSDC_DMA_CFG_ACTIVEEN BIT(13) /* RW */
+#define MSDC_DMA_CFG_CS12B16B BIT(16) /* RW */
/* MSDC_PATCH_BIT mask */
-#define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */
-#define MSDC_INT_DAT_LATCH_CK_SEL (0x7 << 7)
-#define MSDC_CKGEN_MSDC_DLY_SEL (0x1f << 10)
-#define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */
-#define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */
-#define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */
-#define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */
-#define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */
-#define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */
-#define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */
-#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
-#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
-
-#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */
-#define MSDC_PB1_BUSY_CHECK_SEL (0x1 << 7) /* RW */
-#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
-
-#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
-#define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */
-#define MSDC_PB2_SUPPORT_64G (0x1 << 1) /* RW */
-#define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */
-#define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */
-#define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */
-
-#define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */
-#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
-#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
-#define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */
-#define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */
-#define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */
-#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */
-#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */
-
-#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
-#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
-#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
-
-#define PAD_CMD_TUNE_RX_DLY3 (0x1f << 1) /* RW */
+#define MSDC_PATCH_BIT_ODDSUPP BIT(1) /* RW */
+#define MSDC_PATCH_BIT_DIS_WRMON BIT(2) /* RW */
+#define MSDC_PATCH_BIT_RD_DAT_SEL BIT(3) /* RW */
+#define MSDC_PATCH_BIT_DESCUP_SEL BIT(6) /* RW */
+#define MSDC_INT_DAT_LATCH_CK_SEL GENMASK(9, 7)
+#define MSDC_CKGEN_MSDC_DLY_SEL GENMASK(14, 10)
+#define MSDC_PATCH_BIT_IODSSEL BIT(16) /* RW */
+#define MSDC_PATCH_BIT_IOINTSEL BIT(17) /* RW */
+#define MSDC_PATCH_BIT_BUSYDLY GENMASK(21, 18) /* RW */
+#define MSDC_PATCH_BIT_WDOD GENMASK(25, 22) /* RW */
+#define MSDC_PATCH_BIT_IDRTSEL BIT(26) /* RW */
+#define MSDC_PATCH_BIT_CMDFSEL BIT(27) /* RW */
+#define MSDC_PATCH_BIT_INTDLSEL BIT(28) /* RW */
+#define MSDC_PATCH_BIT_SPCPUSH BIT(29) /* RW */
+#define MSDC_PATCH_BIT_DECRCTMO BIT(30) /* RW */
+
+/* MSDC_PATCH_BIT1 mask */
+#define MSDC_PB1_WRDAT_CRC_TACNTR GENMASK(2, 0) /* RW */
+#define MSDC_PATCH_BIT1_CMDTA GENMASK(5, 3) /* RW */
+#define MSDC_PB1_BUSY_CHECK_SEL BIT(7) /* RW */
+#define MSDC_PATCH_BIT1_STOP_DLY GENMASK(11, 8) /* RW */
+#define MSDC_PB1_DDR_CMD_FIX_SEL BIT(14) /* RW */
+#define MSDC_PB1_SINGLE_BURST BIT(16) /* RW */
+#define MSDC_PB1_RSVD20 GENMASK(18, 17) /* RW */
+#define MSDC_PB1_AUTO_SYNCST_CLR BIT(19) /* RW */
+#define MSDC_PB1_MARK_POP_WATER BIT(20) /* RW */
+#define MSDC_PB1_LP_DCM_EN BIT(21) /* RW */
+#define MSDC_PB1_RSVD3 BIT(22) /* RW */
+#define MSDC_PB1_AHB_GDMA_HCLK BIT(23) /* RW */
+#define MSDC_PB1_MSDC_CLK_ENFEAT GENMASK(31, 24) /* RW */
+
+/* MSDC_PATCH_BIT2 mask */
+#define MSDC_PATCH_BIT2_CFGRESP BIT(15) /* RW */
+#define MSDC_PATCH_BIT2_CFGCRCSTS BIT(28) /* RW */
+#define MSDC_PB2_SUPPORT_64G BIT(1) /* RW */
+#define MSDC_PB2_RESPWAIT GENMASK(3, 2) /* RW */
+#define MSDC_PB2_RESPSTSENSEL GENMASK(18, 16) /* RW */
+#define MSDC_PB2_POP_EN_CNT GENMASK(23, 20) /* RW */
+#define MSDC_PB2_CFGCRCSTSEDGE BIT(25) /* RW */
+#define MSDC_PB2_CRCSTSENSEL GENMASK(31, 29) /* RW */
+
+#define MSDC_PAD_TUNE_DATWRDLY GENMASK(4, 0) /* RW */
+#define MSDC_PAD_TUNE_DATRRDLY GENMASK(12, 8) /* RW */
+#define MSDC_PAD_TUNE_DATRRDLY2 GENMASK(12, 8) /* RW */
+#define MSDC_PAD_TUNE_CMDRDLY GENMASK(20, 16) /* RW */
+#define MSDC_PAD_TUNE_CMDRDLY2 GENMASK(20, 16) /* RW */
+#define MSDC_PAD_TUNE_CMDRRDLY GENMASK(26, 22) /* RW */
+#define MSDC_PAD_TUNE_CLKTDLY GENMASK(31, 27) /* RW */
+#define MSDC_PAD_TUNE_RXDLYSEL BIT(15) /* RW */
+#define MSDC_PAD_TUNE_RD_SEL BIT(13) /* RW */
+#define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */
+#define MSDC_PAD_TUNE_RD2_SEL BIT(13) /* RW */
+#define MSDC_PAD_TUNE_CMD2_SEL BIT(21) /* RW */
+
+#define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */
+#define PAD_DS_TUNE_DLY2_SEL BIT(1) /* RW */
+#define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */
+#define PAD_DS_TUNE_DLY2 GENMASK(11, 7) /* RW */
+#define PAD_DS_TUNE_DLY3 GENMASK(16, 12) /* RW */
+
+#define PAD_CMD_TUNE_RX_DLY3 GENMASK(5, 1) /* RW */
/* EMMC51_CFG0 mask */
-#define CMDQ_RDAT_CNT (0x3ff << 12) /* RW */
+#define CMDQ_RDAT_CNT GENMASK(21, 12) /* RW */
-#define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0) /* RW */
-#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
-#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
-#define EMMC50_CFG_CMD_RESP_SEL (0x1 << 9) /* RW */
+#define EMMC50_CFG_PADCMD_LATCHCK BIT(0) /* RW */
+#define EMMC50_CFG_CRCSTS_EDGE BIT(3) /* RW */
+#define EMMC50_CFG_CFCSTS_SEL BIT(4) /* RW */
+#define EMMC50_CFG_CMD_RESP_SEL BIT(9) /* RW */
/* EMMC50_CFG1 mask */
-#define EMMC50_CFG1_DS_CFG (0x1 << 28) /* RW */
+#define EMMC50_CFG1_DS_CFG BIT(28) /* RW */
+
+/* EMMC50_CFG2 mask */
+#define EMMC50_CFG2_AXI_SET_LEN GENMASK(27, 24) /* RW */
-#define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */
+#define EMMC50_CFG3_OUTS_WR GENMASK(4, 0) /* RW */
-#define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */
-#define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */
+#define SDC_FIFO_CFG_WRVALIDSEL BIT(24) /* RW */
+#define SDC_FIFO_CFG_RDVALIDSEL BIT(25) /* RW */
/* CQHCI_SETTING */
-#define CQHCI_RD_CMD_WND_SEL (0x1 << 14) /* RW */
-#define CQHCI_WR_CMD_WND_SEL (0x1 << 15) /* RW */
+#define CQHCI_RD_CMD_WND_SEL BIT(14) /* RW */
+#define CQHCI_WR_CMD_WND_SEL BIT(15) /* RW */
/* EMMC_TOP_CONTROL mask */
-#define PAD_RXDLY_SEL (0x1 << 0) /* RW */
-#define DELAY_EN (0x1 << 1) /* RW */
-#define PAD_DAT_RD_RXDLY2 (0x1f << 2) /* RW */
-#define PAD_DAT_RD_RXDLY (0x1f << 7) /* RW */
-#define PAD_DAT_RD_RXDLY2_SEL (0x1 << 12) /* RW */
-#define PAD_DAT_RD_RXDLY_SEL (0x1 << 13) /* RW */
-#define DATA_K_VALUE_SEL (0x1 << 14) /* RW */
-#define SDC_RX_ENH_EN (0x1 << 15) /* TW */
+#define PAD_RXDLY_SEL BIT(0) /* RW */
+#define DELAY_EN BIT(1) /* RW */
+#define PAD_DAT_RD_RXDLY2 GENMASK(6, 2) /* RW */
+#define PAD_DAT_RD_RXDLY GENMASK(11, 7) /* RW */
+#define PAD_DAT_RD_RXDLY2_SEL BIT(12) /* RW */
+#define PAD_DAT_RD_RXDLY_SEL BIT(13) /* RW */
+#define DATA_K_VALUE_SEL BIT(14) /* RW */
+#define SDC_RX_ENH_EN BIT(15) /* TW */
/* EMMC_TOP_CMD mask */
-#define PAD_CMD_RXDLY2 (0x1f << 0) /* RW */
-#define PAD_CMD_RXDLY (0x1f << 5) /* RW */
-#define PAD_CMD_RD_RXDLY2_SEL (0x1 << 10) /* RW */
-#define PAD_CMD_RD_RXDLY_SEL (0x1 << 11) /* RW */
-#define PAD_CMD_TX_DLY (0x1f << 12) /* RW */
-
-#define REQ_CMD_EIO (0x1 << 0)
-#define REQ_CMD_TMO (0x1 << 1)
-#define REQ_DAT_ERR (0x1 << 2)
-#define REQ_STOP_EIO (0x1 << 3)
-#define REQ_STOP_TMO (0x1 << 4)
-#define REQ_CMD_BUSY (0x1 << 5)
-
-#define MSDC_PREPARE_FLAG (0x1 << 0)
-#define MSDC_ASYNC_FLAG (0x1 << 1)
-#define MSDC_MMAP_FLAG (0x1 << 2)
+#define PAD_CMD_RXDLY2 GENMASK(4, 0) /* RW */
+#define PAD_CMD_RXDLY GENMASK(9, 5) /* RW */
+#define PAD_CMD_RD_RXDLY2_SEL BIT(10) /* RW */
+#define PAD_CMD_RD_RXDLY_SEL BIT(11) /* RW */
+#define PAD_CMD_TX_DLY GENMASK(16, 12) /* RW */
+
+/* EMMC50_PAD_DS_TUNE mask */
+#define PAD_DS_DLY_SEL BIT(16) /* RW */
+#define PAD_DS_DLY2_SEL BIT(15) /* RW */
+#define PAD_DS_DLY1 GENMASK(14, 10) /* RW */
+#define PAD_DS_DLY3 GENMASK(4, 0) /* RW */
+
+/* LOOP_TEST_CONTROL mask */
+#define TEST_LOOP_DSCLK_MUX_SEL BIT(0) /* RW */
+#define TEST_LOOP_LATCH_MUX_SEL BIT(1) /* RW */
+#define LOOP_EN_SEL_CLK BIT(20) /* RW */
+#define TEST_HS400_CMD_LOOP_MUX_SEL BIT(31) /* RW */
+
+#define REQ_CMD_EIO BIT(0)
+#define REQ_CMD_TMO BIT(1)
+#define REQ_DAT_ERR BIT(2)
+#define REQ_STOP_EIO BIT(3)
+#define REQ_STOP_TMO BIT(4)
+#define REQ_CMD_BUSY BIT(5)
+
+#define MSDC_PREPARE_FLAG BIT(0)
+#define MSDC_ASYNC_FLAG BIT(1)
+#define MSDC_MMAP_FLAG BIT(2)
#define MTK_MMC_AUTOSUSPEND_DELAY 50
#define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */
@@ -318,23 +365,25 @@
#define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */
-#define PAD_DELAY_MAX 32 /* PAD delay cells */
+#define TUNING_REG2_FIXED_OFFEST 4
+#define PAD_DELAY_HALF 32 /* PAD delay cells */
+#define PAD_DELAY_FULL 64
/*--------------------------------------------------------------------------*/
/* Descriptor Structure */
/*--------------------------------------------------------------------------*/
struct mt_gpdma_desc {
u32 gpd_info;
-#define GPDMA_DESC_HWO (0x1 << 0)
-#define GPDMA_DESC_BDP (0x1 << 1)
-#define GPDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */
-#define GPDMA_DESC_INT (0x1 << 16)
-#define GPDMA_DESC_NEXT_H4 (0xf << 24)
-#define GPDMA_DESC_PTR_H4 (0xf << 28)
+#define GPDMA_DESC_HWO BIT(0)
+#define GPDMA_DESC_BDP BIT(1)
+#define GPDMA_DESC_CHECKSUM GENMASK(15, 8)
+#define GPDMA_DESC_INT BIT(16)
+#define GPDMA_DESC_NEXT_H4 GENMASK(27, 24)
+#define GPDMA_DESC_PTR_H4 GENMASK(31, 28)
u32 next;
u32 ptr;
u32 gpd_data_len;
-#define GPDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */
-#define GPDMA_DESC_EXTLEN (0xff << 16) /* bit16 ~ bit23 */
+#define GPDMA_DESC_BUFLEN GENMASK(15, 0)
+#define GPDMA_DESC_EXTLEN GENMASK(23, 16)
u32 arg;
u32 blknum;
u32 cmd;
@@ -342,17 +391,17 @@ struct mt_gpdma_desc {
struct mt_bdma_desc {
u32 bd_info;
-#define BDMA_DESC_EOL (0x1 << 0)
-#define BDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */
-#define BDMA_DESC_BLKPAD (0x1 << 17)
-#define BDMA_DESC_DWPAD (0x1 << 18)
-#define BDMA_DESC_NEXT_H4 (0xf << 24)
-#define BDMA_DESC_PTR_H4 (0xf << 28)
+#define BDMA_DESC_EOL BIT(0)
+#define BDMA_DESC_CHECKSUM GENMASK(15, 8)
+#define BDMA_DESC_BLKPAD BIT(17)
+#define BDMA_DESC_DWPAD BIT(18)
+#define BDMA_DESC_NEXT_H4 GENMASK(27, 24)
+#define BDMA_DESC_PTR_H4 GENMASK(31, 28)
u32 next;
u32 ptr;
u32 bd_data_len;
-#define BDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */
-#define BDMA_DESC_BUFLEN_EXT (0xffffff) /* bit0 ~ bit23 */
+#define BDMA_DESC_BUFLEN GENMASK(15, 0)
+#define BDMA_DESC_BUFLEN_EXT GENMASK(23, 0)
};
struct msdc_dma {
@@ -379,20 +428,26 @@ struct msdc_save_para {
u32 emmc_top_control;
u32 emmc_top_cmd;
u32 emmc50_pad_ds_tune;
+ u32 loop_test_control;
};
struct mtk_mmc_compatible {
u8 clk_div_bits;
bool recheck_sdio_irq;
bool hs400_tune; /* only used for MT8173 */
+ bool needs_top_base;
u32 pad_tune_reg;
bool async_fifo;
bool data_tune;
bool busy_check;
bool stop_clk_fix;
+ u8 stop_dly_sel;
+ u8 pop_en_cnt;
bool enhance_rx;
bool support_64g;
bool use_internal_cd;
+ bool support_new_tx;
+ bool support_new_rx;
};
struct msdc_tune_para {
@@ -432,8 +487,10 @@ struct msdc_host {
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default;
struct pinctrl_state *pins_uhs;
+ struct pinctrl_state *pins_eint;
struct delayed_work req_timeout;
int irq; /* host interrupt */
+ int eint_irq; /* interrupt from sdio device for waking up system */
struct reset_control *reset;
struct clk *src_clk; /* msdc source clock */
@@ -441,6 +498,7 @@ struct msdc_host {
struct clk *bus_clk; /* bus clock which used to access register */
struct clk *src_clk_cg; /* msdc source clock control gate */
struct clk *sys_clk_cg; /* msdc subsys clock control gate */
+ struct clk *crypto_clk; /* msdc crypto clock control gate */
struct clk_bulk_data bulk_clks[MSDC_NR_CLOCKS];
u32 mclk; /* mmc subsystem clock frequency */
u32 src_clk_freq; /* source clock frequency */
@@ -448,46 +506,38 @@ struct msdc_host {
bool vqmmc_enabled;
u32 latch_ck;
u32 hs400_ds_delay;
+ u32 hs400_ds_dly3;
u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
+ u32 tuning_step;
bool hs400_cmd_resp_sel_rising;
/* cmd response sample selection for HS400 */
bool hs400_mode; /* current eMMC will run at hs400 mode */
+ bool hs400_tuning; /* hs400 mode online tuning */
bool internal_cd; /* Use internal card-detect logic */
bool cqhci; /* support eMMC hw cmdq */
+ bool hsq_en; /* Host Software Queue is enabled */
struct msdc_save_para save_para; /* used when gate HCLK */
struct msdc_tune_para def_tune_para; /* default tune setting */
struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
struct cqhci_host *cq_host;
+ u32 cq_ssc1_time;
};
-static const struct mtk_mmc_compatible mt8135_compat = {
- .clk_div_bits = 8,
+static const struct mtk_mmc_compatible mt2701_compat = {
+ .clk_div_bits = 12,
.recheck_sdio_irq = true,
.hs400_tune = false,
- .pad_tune_reg = MSDC_PAD_TUNE,
- .async_fifo = false,
- .data_tune = false,
- .busy_check = false,
- .stop_clk_fix = false,
- .enhance_rx = false,
- .support_64g = false,
-};
-
-static const struct mtk_mmc_compatible mt8173_compat = {
- .clk_div_bits = 8,
- .recheck_sdio_irq = true,
- .hs400_tune = true,
- .pad_tune_reg = MSDC_PAD_TUNE,
- .async_fifo = false,
- .data_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
.support_64g = false,
};
-static const struct mtk_mmc_compatible mt8183_compat = {
+static const struct mtk_mmc_compatible mt2712_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
@@ -496,34 +546,49 @@ static const struct mtk_mmc_compatible mt8183_compat = {
.data_tune = true,
.busy_check = true,
.stop_clk_fix = true,
+ .stop_dly_sel = 3,
.enhance_rx = true,
.support_64g = true,
};
-static const struct mtk_mmc_compatible mt2701_compat = {
+static const struct mtk_mmc_compatible mt6779_compat = {
.clk_div_bits = 12,
- .recheck_sdio_irq = true,
+ .recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .stop_dly_sel = 3,
+ .enhance_rx = true,
+ .support_64g = true,
+};
+
+static const struct mtk_mmc_compatible mt6795_compat = {
+ .clk_div_bits = 8,
+ .recheck_sdio_irq = false,
+ .hs400_tune = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
.support_64g = false,
};
-static const struct mtk_mmc_compatible mt2712_compat = {
- .clk_div_bits = 12,
- .recheck_sdio_irq = false,
+static const struct mtk_mmc_compatible mt7620_compat = {
+ .clk_div_bits = 8,
+ .recheck_sdio_irq = true,
.hs400_tune = false,
- .pad_tune_reg = MSDC_PAD_TUNE0,
- .async_fifo = true,
- .data_tune = true,
- .busy_check = true,
- .stop_clk_fix = true,
- .enhance_rx = true,
- .support_64g = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+ .use_internal_cd = true,
};
static const struct mtk_mmc_compatible mt7622_compat = {
@@ -535,22 +600,27 @@ static const struct mtk_mmc_compatible mt7622_compat = {
.data_tune = true,
.busy_check = true,
.stop_clk_fix = true,
+ .stop_dly_sel = 3,
.enhance_rx = true,
.support_64g = false,
};
-static const struct mtk_mmc_compatible mt8516_compat = {
+static const struct mtk_mmc_compatible mt7986_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = true,
.hs400_tune = false,
+ .needs_top_base = true,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
.busy_check = true,
.stop_clk_fix = true,
+ .stop_dly_sel = 3,
+ .enhance_rx = true,
+ .support_64g = true,
};
-static const struct mtk_mmc_compatible mt7620_compat = {
+static const struct mtk_mmc_compatible mt8135_compat = {
.clk_div_bits = 8,
.recheck_sdio_irq = true,
.hs400_tune = false,
@@ -560,32 +630,82 @@ static const struct mtk_mmc_compatible mt7620_compat = {
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
- .use_internal_cd = true,
+ .support_64g = false,
};
-static const struct mtk_mmc_compatible mt6779_compat = {
+static const struct mtk_mmc_compatible mt8173_compat = {
+ .clk_div_bits = 8,
+ .recheck_sdio_irq = true,
+ .hs400_tune = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+ .support_64g = false,
+};
+
+static const struct mtk_mmc_compatible mt8183_compat = {
+ .clk_div_bits = 12,
+ .recheck_sdio_irq = false,
+ .hs400_tune = false,
+ .needs_top_base = true,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .stop_dly_sel = 3,
+ .enhance_rx = true,
+ .support_64g = true,
+};
+
+static const struct mtk_mmc_compatible mt8516_compat = {
+ .clk_div_bits = 12,
+ .recheck_sdio_irq = true,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .stop_dly_sel = 3,
+};
+
+static const struct mtk_mmc_compatible mt8196_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
+ .needs_top_base = true,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
.busy_check = true,
.stop_clk_fix = true,
+ .stop_dly_sel = 1,
+ .pop_en_cnt = 2,
.enhance_rx = true,
.support_64g = true,
+ .support_new_tx = true,
+ .support_new_rx = true,
};
static const struct of_device_id msdc_of_ids[] = {
- { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
- { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
- { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat},
{ .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
{ .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
+ { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat},
+ { .compatible = "mediatek,mt6795-mmc", .data = &mt6795_compat},
+ { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
{ .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
+ { .compatible = "mediatek,mt7986-mmc", .data = &mt7986_compat},
+ { .compatible = "mediatek,mt7988-mmc", .data = &mt7986_compat},
+ { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
+ { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
+ { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat},
+ { .compatible = "mediatek,mt8196-mmc", .data = &mt8196_compat},
{ .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat},
- { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
- { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat},
+
{}
};
MODULE_DEVICE_TABLE(of, msdc_of_ids);
@@ -627,12 +747,11 @@ static void msdc_reset_hw(struct msdc_host *host)
u32 val;
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
- while (readl(host->base + MSDC_CFG) & MSDC_CFG_RST)
- cpu_relax();
+ readl_poll_timeout_atomic(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
- while (readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_CLR)
- cpu_relax();
+ readl_poll_timeout_atomic(host->base + MSDC_FIFOCS, val,
+ !(val & MSDC_FIFOCS_CLR), 0, 0);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
@@ -708,7 +827,7 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
else
bd[j].bd_info &= ~BDMA_DESC_EOL;
- /* checksume need to clear first */
+ /* checksum need to clear first */
bd[j].bd_info &= ~BDMA_DESC_CHECKSUM;
bd[j].bd_info |= msdc_dma_calcs((u8 *)(&bd[j]), 16) << 8;
}
@@ -716,7 +835,7 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL);
dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE);
- dma_ctrl |= (MSDC_BURST_64B << 12 | 1 << 8);
+ dma_ctrl |= (MSDC_BURST_64B << 12 | BIT(8));
writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL);
if (host->dev_comp->support_64g)
sdr_set_field(host->base + DMA_SA_H4BIT, DMA_ADDR_HIGH_4BIT,
@@ -727,12 +846,18 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data)
{
if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
- data->host_cookie |= MSDC_PREPARE_FLAG;
data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
+ if (data->sg_count)
+ data->host_cookie |= MSDC_PREPARE_FLAG;
}
}
+static bool msdc_data_prepared(struct mmc_data *data)
+{
+ return data->host_cookie & MSDC_PREPARE_FLAG;
+}
+
static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data)
{
if (data->host_cookie & MSDC_ASYNC_FLAG)
@@ -748,19 +873,18 @@ static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data)
static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks)
{
struct mmc_host *mmc = mmc_from_priv(host);
- u64 timeout, clk_ns;
- u32 mode = 0;
+ u64 timeout;
+ u32 clk_ns, mode = 0;
if (mmc->actual_clock == 0) {
timeout = 0;
} else {
- clk_ns = 1000000000ULL;
- do_div(clk_ns, mmc->actual_clock);
+ clk_ns = 1000000000U / mmc->actual_clock;
timeout = ns + clk_ns - 1;
do_div(timeout, clk_ns);
timeout += clks;
/* in 1048576 sclk cycle unit */
- timeout = DIV_ROUND_UP(timeout, (0x1 << 20));
+ timeout = DIV_ROUND_UP(timeout, BIT(20));
if (host->dev_comp->clk_div_bits == 8)
sdr_get_field(host->base + MSDC_CFG,
MSDC_CFG_CKMOD, &mode);
@@ -784,7 +908,7 @@ static void msdc_set_timeout(struct msdc_host *host, u64 ns, u64 clks)
timeout = msdc_timeout_cal(host, ns, clks);
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC,
- (u32)(timeout > 255 ? 255 : timeout));
+ min_t(u32, timeout, 255));
}
static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks)
@@ -793,34 +917,71 @@ static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks)
timeout = msdc_timeout_cal(host, ns, clks);
sdr_set_field(host->base + SDC_CFG, SDC_CFG_WRDTOC,
- (u32)(timeout > 8191 ? 8191 : timeout));
+ min_t(u32, timeout, 8191));
}
static void msdc_gate_clock(struct msdc_host *host)
{
clk_bulk_disable_unprepare(MSDC_NR_CLOCKS, host->bulk_clks);
+ clk_disable_unprepare(host->crypto_clk);
clk_disable_unprepare(host->src_clk_cg);
clk_disable_unprepare(host->src_clk);
clk_disable_unprepare(host->bus_clk);
clk_disable_unprepare(host->h_clk);
}
-static void msdc_ungate_clock(struct msdc_host *host)
+static int msdc_ungate_clock(struct msdc_host *host)
{
+ u32 val;
int ret;
clk_prepare_enable(host->h_clk);
clk_prepare_enable(host->bus_clk);
clk_prepare_enable(host->src_clk);
clk_prepare_enable(host->src_clk_cg);
+ clk_prepare_enable(host->crypto_clk);
ret = clk_bulk_prepare_enable(MSDC_NR_CLOCKS, host->bulk_clks);
if (ret) {
dev_err(host->dev, "Cannot enable pclk/axi/ahb clock gates\n");
- return;
+ return ret;
}
- while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
- cpu_relax();
+ return readl_poll_timeout(host->base + MSDC_CFG, val,
+ (val & MSDC_CFG_CKSTB), 1, 20000);
+}
+
+static void msdc_new_tx_setting(struct msdc_host *host)
+{
+ u32 val;
+
+ if (!host->top_base)
+ return;
+
+ val = readl(host->top_base + LOOP_TEST_CONTROL);
+ val |= TEST_LOOP_DSCLK_MUX_SEL;
+ val |= TEST_LOOP_LATCH_MUX_SEL;
+ val &= ~TEST_HS400_CMD_LOOP_MUX_SEL;
+
+ switch (host->timing) {
+ case MMC_TIMING_LEGACY:
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR12:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ val &= ~LOOP_EN_SEL_CLK;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_MMC_HS400:
+ val |= LOOP_EN_SEL_CLK;
+ break;
+ default:
+ break;
+ }
+ writel(val, host->top_base + LOOP_TEST_CONTROL);
}
static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
@@ -831,6 +992,8 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
u32 div;
u32 sclk;
u32 tune_reg = host->dev_comp->pad_tune_reg;
+ u32 val;
+ bool timing_changed;
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
@@ -840,6 +1003,11 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
return;
}
+ if (host->timing != timing)
+ timing_changed = true;
+ else
+ timing_changed = false;
+
flags = readl(host->base + MSDC_INTEN);
sdr_clr_bits(host->base + MSDC_INTEN, flags);
if (host->dev_comp->clk_div_bits == 8)
@@ -890,14 +1058,8 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
}
}
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
- /*
- * As src_clk/HCLK use the same bit to gate/ungate,
- * So if want to only gate src_clk, need gate its parent(mux).
- */
- if (host->src_clk_cg)
- clk_disable_unprepare(host->src_clk_cg);
- else
- clk_disable_unprepare(clk_get_parent(host->src_clk));
+
+ clk_disable_unprepare(host->src_clk_cg);
if (host->dev_comp->clk_div_bits == 8)
sdr_set_field(host->base + MSDC_CFG,
MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
@@ -906,13 +1068,9 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
sdr_set_field(host->base + MSDC_CFG,
MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA,
(mode << 12) | div);
- if (host->src_clk_cg)
- clk_prepare_enable(host->src_clk_cg);
- else
- clk_prepare_enable(clk_get_parent(host->src_clk));
- while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
- cpu_relax();
+ clk_prepare_enable(host->src_clk_cg);
+ readl_poll_timeout(host->base + MSDC_CFG, val, (val & MSDC_CFG_CKSTB), 0, 0);
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
mmc->actual_clock = sclk;
host->mclk = hz;
@@ -956,21 +1114,25 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
+ if (host->dev_comp->support_new_tx && timing_changed)
+ msdc_new_tx_setting(host);
+
dev_dbg(host->dev, "sclk: %d, timing: %d\n", mmc->actual_clock,
timing);
}
static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
- struct mmc_request *mrq, struct mmc_command *cmd)
+ struct mmc_command *cmd)
{
u32 resp;
switch (mmc_resp_type(cmd)) {
- /* Actually, R1, R5, R6, R7 are the same */
+ /* Actually, R1, R5, R6, R7 are the same */
case MMC_RSP_R1:
resp = 0x1;
break;
case MMC_RSP_R1B:
+ case MMC_RSP_R1B_NO_CRC:
resp = 0x7;
break;
case MMC_RSP_R2:
@@ -997,22 +1159,22 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
* stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
*/
u32 opcode = cmd->opcode;
- u32 resp = msdc_cmd_find_resp(host, mrq, cmd);
+ u32 resp = msdc_cmd_find_resp(host, cmd);
u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7);
host->cmd_rsp = resp;
if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) ||
opcode == MMC_STOP_TRANSMISSION)
- rawcmd |= (0x1 << 14);
+ rawcmd |= BIT(14);
else if (opcode == SD_SWITCH_VOLTAGE)
- rawcmd |= (0x1 << 30);
+ rawcmd |= BIT(30);
else if (opcode == SD_APP_SEND_SCR ||
opcode == SD_APP_SEND_NUM_WR_BLKS ||
(opcode == SD_SWITCH && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
(opcode == SD_APP_SD_STATUS && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
(opcode == MMC_SEND_EXT_CSD && mmc_cmd_type(cmd) == MMC_CMD_ADTC))
- rawcmd |= (0x1 << 11);
+ rawcmd |= BIT(11);
if (cmd->data) {
struct mmc_data *data = cmd->data;
@@ -1020,16 +1182,16 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
if (mmc_op_multi(opcode)) {
if (mmc_card_mmc(mmc->card) && mrq->sbc &&
!(mrq->sbc->arg & 0xFFFF0000))
- rawcmd |= 0x2 << 28; /* AutoCMD23 */
+ rawcmd |= BIT(29); /* AutoCMD23 */
}
rawcmd |= ((data->blksz & 0xFFF) << 16);
if (data->flags & MMC_DATA_WRITE)
- rawcmd |= (0x1 << 13);
+ rawcmd |= BIT(13);
if (data->blocks > 1)
- rawcmd |= (0x2 << 11);
+ rawcmd |= BIT(12);
else
- rawcmd |= (0x1 << 11);
+ rawcmd |= BIT(11);
/* Always use dma mode */
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO);
@@ -1043,8 +1205,8 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
return rawcmd;
}
-static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq,
- struct mmc_command *cmd, struct mmc_data *data)
+static void msdc_start_data(struct msdc_host *host, struct mmc_command *cmd,
+ struct mmc_data *data)
{
bool read;
@@ -1052,7 +1214,7 @@ static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq,
host->data = data;
read = data->flags & MMC_DATA_READ;
- mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
+ mod_delayed_work(system_percpu_wq, &host->req_timeout, DAT_TIMEOUT);
msdc_dma_setup(host, &host->dma, data);
sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask);
sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
@@ -1112,17 +1274,20 @@ static void msdc_recheck_sdio_irq(struct msdc_host *host)
}
}
-static void msdc_track_cmd_data(struct msdc_host *host,
- struct mmc_command *cmd, struct mmc_data *data)
+static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd)
{
- if (host->error)
- dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n",
- __func__, cmd->opcode, cmd->arg, host->error);
+ if (host->error &&
+ ((!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning) ||
+ cmd->error == -ETIMEDOUT))
+ dev_warn(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n",
+ __func__, cmd->opcode, cmd->arg, host->error);
}
static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
{
+ struct mmc_host *mmc = mmc_from_priv(host);
unsigned long flags;
+ bool hsq_req_done;
/*
* No need check the return value of cancel_delayed_work, as only ONE
@@ -1130,16 +1295,37 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
*/
cancel_delayed_work(&host->req_timeout);
+ /*
+ * If the request was handled from Host Software Queue, there's almost
+ * nothing to do here, and we also don't need to reset mrq as any race
+ * condition would not have any room to happen, since HSQ stores the
+ * "scheduled" mrqs in an internal array of mrq slots anyway.
+ * However, if the controller experienced an error, we still want to
+ * reset it as soon as possible.
+ *
+ * Note that non-HSQ requests will still be happening at times, even
+ * though it is enabled, and that's what is going to reset host->mrq.
+ * Also, msdc_unprepare_data() is going to be called by HSQ when needed
+ * as HSQ request finalization will eventually call the .post_req()
+ * callback of this driver which, in turn, unprepares the data.
+ */
+ hsq_req_done = host->hsq_en ? mmc_hsq_finalize_request(mmc, mrq) : false;
+ if (hsq_req_done) {
+ if (host->error)
+ msdc_reset_hw(host);
+ return;
+ }
+
spin_lock_irqsave(&host->lock, flags);
host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
- msdc_track_cmd_data(host, mrq->cmd, mrq->data);
+ msdc_track_cmd_data(host, mrq->cmd);
if (mrq->data)
msdc_unprepare_data(host, mrq->data);
if (host->error)
msdc_reset_hw(host);
- mmc_request_done(mmc_from_priv(host), mrq);
+ mmc_request_done(mmc, mrq);
if (host->dev_comp->recheck_sdio_irq)
msdc_recheck_sdio_irq(host);
}
@@ -1188,16 +1374,16 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
}
if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
- if (events & MSDC_INT_CMDTMO ||
- (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
- cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200))
+ if ((events & MSDC_INT_CMDTMO && !host->hs400_tuning) ||
+ (!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning))
/*
* should not clear fifo/interrupt as the tune data
- * may have alreay come when cmd19/cmd21 gets response
+ * may have already come when cmd19/cmd21 gets response
* CRC error.
*/
msdc_reset_hw(host);
- if (events & MSDC_INT_RSPCRCERR) {
+ if (events & MSDC_INT_RSPCRCERR &&
+ mmc_resp_type(cmd) != MMC_RSP_R1B_NO_CRC) {
cmd->error = -EILSEQ;
host->error |= REQ_CMD_EIO;
} else if (events & MSDC_INT_CMDTMO) {
@@ -1222,13 +1408,13 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
static inline bool msdc_cmd_is_ready(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd)
{
- /* The max busy time we can endure is 20ms */
- unsigned long tmo = jiffies + msecs_to_jiffies(20);
+ u32 val;
+ int ret;
- while ((readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) &&
- time_before(jiffies, tmo))
- cpu_relax();
- if (readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) {
+ /* The max busy time we can endure is 20ms */
+ ret = readl_poll_timeout_atomic(host->base + SDC_STS, val,
+ !(val & SDC_STS_CMDBUSY), 1, 20000);
+ if (ret) {
dev_err(host->dev, "CMD bus busy detected\n");
host->error |= REQ_CMD_BUSY;
msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
@@ -1236,12 +1422,10 @@ static inline bool msdc_cmd_is_ready(struct msdc_host *host,
}
if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) {
- tmo = jiffies + msecs_to_jiffies(20);
/* R1B or with data, should check SDCBUSY */
- while ((readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) &&
- time_before(jiffies, tmo))
- cpu_relax();
- if (readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) {
+ ret = readl_poll_timeout_atomic(host->base + SDC_STS, val,
+ !(val & SDC_STS_SDCBUSY), 1, 20000);
+ if (ret) {
dev_err(host->dev, "Controller busy detected\n");
host->error |= REQ_CMD_BUSY;
msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
@@ -1260,7 +1444,7 @@ static void msdc_start_command(struct msdc_host *host,
WARN_ON(host->cmd);
host->cmd = cmd;
- mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
+ mod_delayed_work(system_percpu_wq, &host->req_timeout, DAT_TIMEOUT);
if (!msdc_cmd_is_ready(host, mrq, cmd))
return;
@@ -1284,10 +1468,9 @@ static void msdc_start_command(struct msdc_host *host,
static void msdc_cmd_next(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd)
{
- if ((cmd->error &&
- !(cmd->error == -EILSEQ &&
- (cmd->opcode == MMC_SEND_TUNING_BLOCK ||
- cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) ||
+ if ((cmd->error && !host->hs400_tuning &&
+ !(cmd->error == -EILSEQ &&
+ mmc_op_tuning(cmd->opcode))) ||
(mrq->sbc && mrq->sbc->error))
msdc_request_done(host, mrq);
else if (cmd == mrq->sbc)
@@ -1295,7 +1478,7 @@ static void msdc_cmd_next(struct msdc_host *host,
else if (!cmd->data)
msdc_request_done(host, mrq);
else
- msdc_start_data(host, mrq, cmd, cmd->data);
+ msdc_start_data(host, cmd, cmd->data);
}
static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -1303,11 +1486,22 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct msdc_host *host = mmc_priv(mmc);
host->error = 0;
- WARN_ON(host->mrq);
+ WARN_ON(!host->hsq_en && host->mrq);
host->mrq = mrq;
- if (mrq->data)
+ if (mrq->data) {
msdc_prepare_data(host, mrq->data);
+ if (!msdc_data_prepared(mrq->data)) {
+ host->mrq = NULL;
+ /*
+ * Failed to prepare DMA area, fail fast before
+ * starting any commands.
+ */
+ mrq->cmd->error = -ENOSPC;
+ mmc_request_done(mmc_from_priv(host), mrq);
+ return;
+ }
+ }
/* if SBC is required, we have HW option and SW option.
* if HW option is enabled, and SBC does not have "special" flags,
@@ -1356,7 +1550,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq)
msdc_request_done(host, mrq);
}
-static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
+static void msdc_data_xfer_done(struct msdc_host *host, u32 events,
struct mmc_request *mrq, struct mmc_data *data)
{
struct mmc_command *stop;
@@ -1366,6 +1560,8 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
(MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO
| MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR
| MSDC_INT_DMA_PROTECT);
+ u32 val;
+ int ret;
spin_lock_irqsave(&host->lock, flags);
done = !host->data;
@@ -1374,7 +1570,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
spin_unlock_irqrestore(&host->lock, flags);
if (done)
- return true;
+ return;
stop = data->stop;
if (check_data || (stop && stop->error)) {
@@ -1382,8 +1578,17 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
readl(host->base + MSDC_DMA_CFG));
sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP,
1);
- while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS)
- cpu_relax();
+
+ ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CTRL, val,
+ !(val & MSDC_DMA_CTRL_STOP), 1, 20000);
+ if (ret)
+ dev_dbg(host->dev, "DMA stop timed out\n");
+
+ ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val,
+ !(val & MSDC_DMA_CFG_STS), 1, 20000);
+ if (ret)
+ dev_dbg(host->dev, "DMA inactive timed out\n");
+
sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask);
dev_dbg(host->dev, "DMA stop\n");
@@ -1407,9 +1612,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
}
msdc_data_xfer_next(host, mrq);
- done = true;
}
- return done;
}
static void msdc_set_buswidth(struct msdc_host *host, u32 width)
@@ -1512,17 +1715,46 @@ static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
{
- unsigned long flags;
struct msdc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret;
spin_lock_irqsave(&host->lock, flags);
__msdc_enable_sdio_irq(host, enb);
spin_unlock_irqrestore(&host->lock, flags);
- if (enb)
- pm_runtime_get_noresume(host->dev);
- else
- pm_runtime_put_noidle(host->dev);
+ if (mmc_card_enable_async_irq(mmc->card) && host->pins_eint) {
+ if (enb) {
+ /*
+ * In dev_pm_set_dedicated_wake_irq_reverse(), eint pin will be set to
+ * GPIO mode. We need to restore it to SDIO DAT1 mode after that.
+ * Since the current pinstate is pins_uhs, to ensure pinctrl select take
+ * affect successfully, we change the pinstate to pins_eint firstly.
+ */
+ pinctrl_select_state(host->pinctrl, host->pins_eint);
+ ret = dev_pm_set_dedicated_wake_irq_reverse(host->dev, host->eint_irq);
+
+ if (ret) {
+ dev_err(host->dev, "Failed to register SDIO wakeup irq!\n");
+ host->pins_eint = NULL;
+ pm_runtime_get_noresume(host->dev);
+ } else {
+ dev_dbg(host->dev, "SDIO eint irq: %d!\n", host->eint_irq);
+ }
+
+ pinctrl_select_state(host->pinctrl, host->pins_uhs);
+ } else {
+ dev_pm_clear_wake_irq(host->dev);
+ }
+ } else {
+ if (enb) {
+ /* Ensure host->pins_eint is NULL */
+ host->pins_eint = NULL;
+ pm_runtime_get_noresume(host->dev);
+ } else {
+ pm_runtime_put_noidle(host->dev);
+ }
+ }
}
static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
@@ -1547,7 +1779,7 @@ static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
}
if (cmd_err || dat_err) {
- dev_err(host->dev, "cmd_err = %d, dat_err =%d, intsts = 0x%x",
+ dev_err(host->dev, "cmd_err = %d, dat_err = %d, intsts = 0x%x",
cmd_err, dat_err, intsts);
}
@@ -1619,8 +1851,9 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
static void msdc_init_hw(struct msdc_host *host)
{
- u32 val;
+ u32 val, pb1_val, pb2_val;
u32 tune_reg = host->dev_comp->pad_tune_reg;
+ struct mmc_host *mmc = mmc_from_priv(host);
if (host->reset) {
reset_control_assert(host->reset);
@@ -1628,6 +1861,17 @@ static void msdc_init_hw(struct msdc_host *host)
reset_control_deassert(host->reset);
}
+ /* New tx/rx enable bit need to be 0->1 for hardware check */
+ if (host->dev_comp->support_new_tx) {
+ sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN);
+ sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN);
+ msdc_new_tx_setting(host);
+ }
+ if (host->dev_comp->support_new_rx) {
+ sdr_clr_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL);
+ sdr_set_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL);
+ }
+
/* Configure to MMC/SD mode, clock free running */
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
@@ -1660,61 +1904,123 @@ static void msdc_init_hw(struct msdc_host *host)
}
writel(0, host->base + MSDC_IOCON);
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
- writel(0x403c0046, host->base + MSDC_PATCH_BIT);
- sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
- writel(0xffff4089, host->base + MSDC_PATCH_BIT1);
- sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
+
+ /*
+ * Patch bit 0 and 1 are completely rewritten, but for patch bit 2
+ * defaults are retained and, if necessary, only some bits are fixed
+ * up: read the PB2 register here for later usage in this function.
+ */
+ pb2_val = readl(host->base + MSDC_PATCH_BIT2);
+
+ /* Enable odd number support for 8-bit data bus */
+ val = MSDC_PATCH_BIT_ODDSUPP;
+
+ /* Disable SD command register write monitor */
+ val |= MSDC_PATCH_BIT_DIS_WRMON;
+
+ /* Issue transfer done interrupt after GPD update */
+ val |= MSDC_PATCH_BIT_DESCUP_SEL;
+
+ /* Extend R1B busy detection delay (in clock cycles) */
+ val |= FIELD_PREP(MSDC_PATCH_BIT_BUSYDLY, 15);
+
+ /* Enable CRC phase timeout during data write operation */
+ val |= MSDC_PATCH_BIT_DECRCTMO;
+
+ /* Set CKGEN delay to one stage */
+ val |= FIELD_PREP(MSDC_CKGEN_MSDC_DLY_SEL, 1);
+
+ /* First MSDC_PATCH_BIT setup is done: pull the trigger! */
+ writel(val, host->base + MSDC_PATCH_BIT);
+
+ /* Set wr data, crc status, cmd response turnaround period for UHS104 */
+ pb1_val = FIELD_PREP(MSDC_PB1_WRDAT_CRC_TACNTR, 1);
+ pb1_val |= FIELD_PREP(MSDC_PATCH_BIT1_CMDTA, 1);
+ pb1_val |= MSDC_PB1_DDR_CMD_FIX_SEL;
+
+ /* Support 'single' burst type only when AXI_LEN is 0 */
+ sdr_get_field(host->base + EMMC50_CFG2, EMMC50_CFG2_AXI_SET_LEN, &val);
+ if (!val)
+ pb1_val |= MSDC_PB1_SINGLE_BURST;
+
+ /* Set auto sync state clear, block gap stop clk */
+ pb1_val |= MSDC_PB1_RSVD20 | MSDC_PB1_AUTO_SYNCST_CLR | MSDC_PB1_MARK_POP_WATER;
+
+ /* Set low power DCM, use HCLK for GDMA, use MSDC CLK for everything else */
+ pb1_val |= MSDC_PB1_LP_DCM_EN | MSDC_PB1_RSVD3 |
+ MSDC_PB1_AHB_GDMA_HCLK | MSDC_PB1_MSDC_CLK_ENFEAT;
+
+ /* If needed, enable R1b command busy check at controller init time */
+ if (!host->dev_comp->busy_check)
+ pb1_val |= MSDC_PB1_BUSY_CHECK_SEL;
if (host->dev_comp->stop_clk_fix) {
- sdr_set_field(host->base + MSDC_PATCH_BIT1,
- MSDC_PATCH_BIT1_STOP_DLY, 3);
- sdr_clr_bits(host->base + SDC_FIFO_CFG,
- SDC_FIFO_CFG_WRVALIDSEL);
- sdr_clr_bits(host->base + SDC_FIFO_CFG,
- SDC_FIFO_CFG_RDVALIDSEL);
- }
+ if (host->dev_comp->stop_dly_sel)
+ pb1_val |= FIELD_PREP(MSDC_PATCH_BIT1_STOP_DLY,
+ host->dev_comp->stop_dly_sel);
+
+ if (host->dev_comp->pop_en_cnt) {
+ pb2_val &= ~MSDC_PB2_POP_EN_CNT;
+ pb2_val |= FIELD_PREP(MSDC_PB2_POP_EN_CNT,
+ host->dev_comp->pop_en_cnt);
+ }
- if (host->dev_comp->busy_check)
- sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7));
+ sdr_clr_bits(host->base + SDC_FIFO_CFG, SDC_FIFO_CFG_WRVALIDSEL);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG, SDC_FIFO_CFG_RDVALIDSEL);
+ }
if (host->dev_comp->async_fifo) {
- sdr_set_field(host->base + MSDC_PATCH_BIT2,
- MSDC_PB2_RESPWAIT, 3);
- if (host->dev_comp->enhance_rx) {
- if (host->top_base)
- sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
- SDC_RX_ENH_EN);
- else
- sdr_set_bits(host->base + SDC_ADV_CFG0,
- SDC_RX_ENHANCE_EN);
+ /* Set CMD response timeout multiplier to 65 + (16 * 3) cycles */
+ pb2_val &= ~MSDC_PB2_RESPWAIT;
+ pb2_val |= FIELD_PREP(MSDC_PB2_RESPWAIT, 3);
+
+ /* eMMC4.5: Select async FIFO path for CMD resp and CRC status */
+ pb2_val &= ~MSDC_PATCH_BIT2_CFGRESP;
+ pb2_val |= MSDC_PATCH_BIT2_CFGCRCSTS;
+
+ if (!host->dev_comp->enhance_rx) {
+ /* eMMC4.5: Delay 2T for CMD resp and CRC status EN signals */
+ pb2_val &= ~(MSDC_PB2_RESPSTSENSEL | MSDC_PB2_CRCSTSENSEL);
+ pb2_val |= FIELD_PREP(MSDC_PB2_RESPSTSENSEL, 2);
+ pb2_val |= FIELD_PREP(MSDC_PB2_CRCSTSENSEL, 2);
+ } else if (host->top_base) {
+ sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, SDC_RX_ENH_EN);
} else {
- sdr_set_field(host->base + MSDC_PATCH_BIT2,
- MSDC_PB2_RESPSTSENSEL, 2);
- sdr_set_field(host->base + MSDC_PATCH_BIT2,
- MSDC_PB2_CRCSTSENSEL, 2);
+ sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_RX_ENHANCE_EN);
}
- /* use async fifo, then no need tune internal delay */
- sdr_clr_bits(host->base + MSDC_PATCH_BIT2,
- MSDC_PATCH_BIT2_CFGRESP);
- sdr_set_bits(host->base + MSDC_PATCH_BIT2,
- MSDC_PATCH_BIT2_CFGCRCSTS);
}
if (host->dev_comp->support_64g)
- sdr_set_bits(host->base + MSDC_PATCH_BIT2,
- MSDC_PB2_SUPPORT_64G);
+ pb2_val |= MSDC_PB2_SUPPORT_64G;
+
+ /* Patch Bit 1/2 setup is done: pull the trigger! */
+ writel(pb1_val, host->base + MSDC_PATCH_BIT1);
+ writel(pb2_val, host->base + MSDC_PATCH_BIT2);
+ sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
+
if (host->dev_comp->data_tune) {
if (host->top_base) {
- sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
- PAD_DAT_RD_RXDLY_SEL);
- sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL,
- DATA_K_VALUE_SEL);
- sdr_set_bits(host->top_base + EMMC_TOP_CMD,
- PAD_CMD_RD_RXDLY_SEL);
+ u32 top_ctl_val = readl(host->top_base + EMMC_TOP_CONTROL);
+ u32 top_cmd_val = readl(host->top_base + EMMC_TOP_CMD);
+
+ top_cmd_val |= PAD_CMD_RD_RXDLY_SEL;
+ top_ctl_val |= PAD_DAT_RD_RXDLY_SEL;
+ top_ctl_val &= ~DATA_K_VALUE_SEL;
+ if (host->tuning_step > PAD_DELAY_HALF) {
+ top_cmd_val |= PAD_CMD_RD_RXDLY2_SEL;
+ top_ctl_val |= PAD_DAT_RD_RXDLY2_SEL;
+ }
+
+ writel(top_ctl_val, host->top_base + EMMC_TOP_CONTROL);
+ writel(top_cmd_val, host->top_base + EMMC_TOP_CMD);
} else {
sdr_set_bits(host->base + tune_reg,
MSDC_PAD_TUNE_RD_SEL |
MSDC_PAD_TUNE_CMD_SEL);
+ if (host->tuning_step > PAD_DELAY_HALF)
+ sdr_set_bits(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
+ MSDC_PAD_TUNE_RD2_SEL |
+ MSDC_PAD_TUNE_CMD2_SEL);
}
} else {
/* choose clock tune */
@@ -1726,14 +2032,18 @@ static void msdc_init_hw(struct msdc_host *host)
MSDC_PAD_TUNE_RXDLYSEL);
}
- /* Configure to enable SDIO mode.
- * it's must otherwise sdio cmd5 failed
- */
- sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
+ if (mmc->caps2 & MMC_CAP2_NO_SDIO) {
+ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
+ sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
+ sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
+ } else {
+ /* Configure to enable SDIO mode, otherwise SDIO CMD5 fails */
+ sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
- /* Config SDIO device detect interrupt function */
- sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
- sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
+ /* Config SDIO device detect interrupt function */
+ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+ sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
+ }
/* Configure to default data timeout */
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
@@ -1852,24 +2162,24 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
msdc_set_mclk(host, ios->timing, ios->clock);
}
-static u32 test_delay_bit(u32 delay, u32 bit)
+static u64 test_delay_bit(u64 delay, u32 bit)
{
- bit %= PAD_DELAY_MAX;
- return delay & (1 << bit);
+ bit %= PAD_DELAY_FULL;
+ return delay & BIT_ULL(bit);
}
-static int get_delay_len(u32 delay, u32 start_bit)
+static int get_delay_len(u64 delay, u32 start_bit)
{
int i;
- for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) {
+ for (i = 0; i < (PAD_DELAY_FULL - start_bit); i++) {
if (test_delay_bit(delay, start_bit + i) == 0)
return i;
}
- return PAD_DELAY_MAX - start_bit;
+ return PAD_DELAY_FULL - start_bit;
}
-static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
+static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u64 delay)
{
int start = 0, len = 0;
int start_final = 0, len_final = 0;
@@ -1877,29 +2187,29 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
struct msdc_delay_phase delay_phase = { 0, };
if (delay == 0) {
- dev_err(host->dev, "phase error: [map:%x]\n", delay);
+ dev_err(host->dev, "phase error: [map:%016llx]\n", delay);
delay_phase.final_phase = final_phase;
return delay_phase;
}
- while (start < PAD_DELAY_MAX) {
+ while (start < PAD_DELAY_FULL) {
len = get_delay_len(delay, start);
if (len_final < len) {
start_final = start;
len_final = len;
}
start += len ? len : 1;
- if (len >= 12 && start_final < 4)
+ if (!upper_32_bits(delay) && len >= 12 && start_final < 4)
break;
}
/* The rule is that to find the smallest delay cell */
if (start_final == 0)
- final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX;
+ final_phase = (start_final + len_final / 3) % PAD_DELAY_FULL;
else
- final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX;
- dev_info(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
- delay, len_final, final_phase);
+ final_phase = (start_final + len_final / 2) % PAD_DELAY_FULL;
+ dev_dbg(host->dev, "phase: [map:%016llx] [maxlen:%d] [final:%d]\n",
+ delay, len_final, final_phase);
delay_phase.maxlen = len_final;
delay_phase.start = start_final;
@@ -1911,30 +2221,80 @@ static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value)
{
u32 tune_reg = host->dev_comp->pad_tune_reg;
- if (host->top_base)
- sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY,
- value);
- else
- sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
- value);
+ if (host->top_base) {
+ u32 regval = readl(host->top_base + EMMC_TOP_CMD);
+
+ regval &= ~(PAD_CMD_RXDLY | PAD_CMD_RXDLY2);
+
+ if (value < PAD_DELAY_HALF) {
+ regval |= FIELD_PREP(PAD_CMD_RXDLY, value);
+ } else {
+ regval |= FIELD_PREP(PAD_CMD_RXDLY, PAD_DELAY_HALF - 1);
+ regval |= FIELD_PREP(PAD_CMD_RXDLY2, value - PAD_DELAY_HALF);
+ }
+ writel(regval, host->top_base + EMMC_TOP_CMD);
+ } else {
+ if (value < PAD_DELAY_HALF) {
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, value);
+ sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
+ MSDC_PAD_TUNE_CMDRDLY2, 0);
+ } else {
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
+ PAD_DELAY_HALF - 1);
+ sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
+ MSDC_PAD_TUNE_CMDRDLY2, value - PAD_DELAY_HALF);
+ }
+ }
}
static inline void msdc_set_data_delay(struct msdc_host *host, u32 value)
{
u32 tune_reg = host->dev_comp->pad_tune_reg;
- if (host->top_base)
- sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
- PAD_DAT_RD_RXDLY, value);
- else
- sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY,
- value);
+ if (host->top_base) {
+ u32 regval = readl(host->top_base + EMMC_TOP_CONTROL);
+
+ regval &= ~(PAD_DAT_RD_RXDLY | PAD_DAT_RD_RXDLY2);
+
+ if (value < PAD_DELAY_HALF) {
+ regval |= FIELD_PREP(PAD_DAT_RD_RXDLY, value);
+ regval |= FIELD_PREP(PAD_DAT_RD_RXDLY2, value);
+ } else {
+ regval |= FIELD_PREP(PAD_DAT_RD_RXDLY, PAD_DELAY_HALF - 1);
+ regval |= FIELD_PREP(PAD_DAT_RD_RXDLY2, value - PAD_DELAY_HALF);
+ }
+ writel(regval, host->top_base + EMMC_TOP_CONTROL);
+ } else {
+ if (value < PAD_DELAY_HALF) {
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, value);
+ sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
+ MSDC_PAD_TUNE_DATRRDLY2, 0);
+ } else {
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY,
+ PAD_DELAY_HALF - 1);
+ sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
+ MSDC_PAD_TUNE_DATRRDLY2, value - PAD_DELAY_HALF);
+ }
+ }
+}
+
+static inline void msdc_set_data_sample_edge(struct msdc_host *host, bool rising)
+{
+ u32 value = rising ? 0 : 1;
+
+ if (host->dev_comp->support_new_rx) {
+ sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_PATCH_BIT_RD_DAT_SEL, value);
+ sdr_set_field(host->base + MSDC_PATCH_BIT2, MSDC_PB2_CFGCRCSTSEDGE, value);
+ } else {
+ sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL, value);
+ sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL, value);
+ }
}
static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
- u32 rise_delay = 0, fall_delay = 0;
+ u64 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
struct msdc_delay_phase internal_delay_phase;
u8 final_delay, final_maxlen;
@@ -1950,7 +2310,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
host->hs200_cmd_int_delay);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_cmd_delay(host, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1960,9 +2320,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
for (j = 0; j < 3; j++) {
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err) {
- rise_delay |= (1 << i);
+ rise_delay |= BIT_ULL(i);
} else {
- rise_delay &= ~(1 << i);
+ rise_delay &= ~BIT_ULL(i);
break;
}
}
@@ -1974,7 +2334,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- for (i = 0; i < PAD_DELAY_MAX; i++) {
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_cmd_delay(host, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1984,9 +2344,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
for (j = 0; j < 3; j++) {
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err) {
- fall_delay |= (1 << i);
+ fall_delay |= BIT_ULL(i);
} else {
- fall_delay &= ~(1 << i);
+ fall_delay &= ~BIT_ULL(i);
break;
}
}
@@ -2009,12 +2369,12 @@ skip_fall:
if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
goto skip_internal;
- for (i = 0; i < PAD_DELAY_MAX; i++) {
+ for (i = 0; i < host->tuning_step; i++) {
sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY, i);
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err)
- internal_delay |= (1 << i);
+ internal_delay |= BIT_ULL(i);
}
dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
internal_delay_phase = get_best_delay(host, internal_delay);
@@ -2048,7 +2408,8 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
else
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+
+ for (i = 0; i < PAD_DELAY_HALF; i++) {
sdr_set_field(host->base + PAD_CMD_TUNE,
PAD_CMD_TUNE_RX_DLY3, i);
/*
@@ -2059,9 +2420,9 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
for (j = 0; j < 3; j++) {
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err) {
- cmd_delay |= (1 << i);
+ cmd_delay |= BIT(i);
} else {
- cmd_delay &= ~(1 << i);
+ cmd_delay &= ~BIT(i);
break;
}
}
@@ -2078,20 +2439,19 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
- u32 rise_delay = 0, fall_delay = 0;
+ u64 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
int i, ret;
sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
host->latch_ck);
- sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
- sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+ msdc_set_data_sample_edge(host, true);
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- rise_delay |= (1 << i);
+ rise_delay |= BIT_ULL(i);
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
@@ -2099,25 +2459,22 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
(final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
goto skip_fall;
- sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
- sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- for (i = 0; i < PAD_DELAY_MAX; i++) {
+ msdc_set_data_sample_edge(host, false);
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- fall_delay |= (1 << i);
+ fall_delay |= BIT_ULL(i);
}
final_fall_delay = get_best_delay(host, fall_delay);
skip_fall:
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
if (final_maxlen == final_rise_delay.maxlen) {
- sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
- sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
+ msdc_set_data_sample_edge(host, true);
final_delay = final_rise_delay.final_phase;
} else {
- sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
- sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
+ msdc_set_data_sample_edge(host, false);
final_delay = final_fall_delay.final_phase;
}
msdc_set_data_delay(host, final_delay);
@@ -2133,7 +2490,7 @@ skip_fall:
static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
- u32 rise_delay = 0, fall_delay = 0;
+ u64 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
int i, ret;
@@ -2142,14 +2499,13 @@ static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
host->latch_ck);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_clr_bits(host->base + MSDC_IOCON,
- MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
- for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+ msdc_set_data_sample_edge(host, true);
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_cmd_delay(host, i);
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- rise_delay |= (1 << i);
+ rise_delay |= BIT_ULL(i);
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
@@ -2158,14 +2514,13 @@ static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_bits(host->base + MSDC_IOCON,
- MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
- for (i = 0; i < PAD_DELAY_MAX; i++) {
+ msdc_set_data_sample_edge(host, false);
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_cmd_delay(host, i);
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- fall_delay |= (1 << i);
+ fall_delay |= BIT_ULL(i);
}
final_fall_delay = get_best_delay(host, fall_delay);
@@ -2173,13 +2528,11 @@ skip_fall:
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_clr_bits(host->base + MSDC_IOCON,
- MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+ msdc_set_data_sample_edge(host, true);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_bits(host->base + MSDC_IOCON,
- MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+ msdc_set_data_sample_edge(host, false);
final_delay = final_fall_delay.final_phase;
}
@@ -2199,8 +2552,7 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->dev_comp->data_tune && host->dev_comp->async_fifo) {
ret = msdc_tune_together(mmc, opcode);
if (host->hs400_mode) {
- sdr_clr_bits(host->base + MSDC_IOCON,
- MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+ msdc_set_data_sample_edge(host, true);
msdc_set_data_delay(host, 0);
}
goto tune_done;
@@ -2236,13 +2588,23 @@ tune_done:
static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
+
host->hs400_mode = true;
- if (host->top_base)
- writel(host->hs400_ds_delay,
- host->top_base + EMMC50_PAD_DS_TUNE);
- else
- writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ if (host->top_base) {
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY3, host->hs400_ds_dly3);
+ if (host->hs400_ds_delay)
+ writel(host->hs400_ds_delay,
+ host->top_base + EMMC50_PAD_DS_TUNE);
+ } else {
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
+ if (host->hs400_ds_delay)
+ writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ }
/* hs400 mode must set it to 0 */
sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
/* to improve read performance, set outstanding to 2 */
@@ -2251,6 +2613,66 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
return 0;
}
+static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ struct msdc_delay_phase dly1_delay;
+ u32 val, result_dly1 = 0;
+ u8 *ext_csd;
+ int i, ret;
+
+ if (host->top_base) {
+ sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY_SEL);
+ sdr_clr_bits(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY2_SEL);
+ } else {
+ sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
+ sdr_clr_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY2_SEL);
+ }
+
+ host->hs400_tuning = true;
+ for (i = 0; i < PAD_DELAY_HALF; i++) {
+ if (host->top_base)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY1, i);
+ else
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY1, i);
+ ret = mmc_get_ext_csd(card, &ext_csd);
+ if (!ret) {
+ result_dly1 |= BIT(i);
+ kfree(ext_csd);
+ }
+ }
+ host->hs400_tuning = false;
+
+ dly1_delay = get_best_delay(host, result_dly1);
+ if (dly1_delay.maxlen == 0) {
+ dev_err(host->dev, "Failed to get DLY1 delay!\n");
+ goto fail;
+ }
+ if (host->top_base)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY1, dly1_delay.final_phase);
+ else
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY1, dly1_delay.final_phase);
+
+ if (host->top_base)
+ val = readl(host->top_base + EMMC50_PAD_DS_TUNE);
+ else
+ val = readl(host->base + PAD_DS_TUNE);
+
+ dev_info(host->dev, "Final PAD_DS_TUNE: 0x%x\n", val);
+
+ return 0;
+
+fail:
+ dev_err(host->dev, "Failed to tuning DS pin delay!\n");
+ return -EIO;
+}
+
static void msdc_hw_reset(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
@@ -2313,9 +2735,49 @@ static void msdc_hs400_enhanced_strobe(struct mmc_host *mmc,
}
}
+static void msdc_cqe_cit_cal(struct msdc_host *host, u64 timer_ns)
+{
+ struct mmc_host *mmc = mmc_from_priv(host);
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u8 itcfmul;
+ u64 hclk_freq, value;
+
+ /*
+ * On MediaTek SoCs the MSDC controller's CQE uses msdc_hclk as ITCFVAL
+ * so we multiply/divide the HCLK frequency by ITCFMUL to calculate the
+ * Send Status Command Idle Timer (CIT) value.
+ */
+ hclk_freq = (u64)clk_get_rate(host->h_clk);
+ itcfmul = CQHCI_ITCFMUL(cqhci_readl(cq_host, CQHCI_CAP));
+ switch (itcfmul) {
+ case 0x0:
+ do_div(hclk_freq, 1000);
+ break;
+ case 0x1:
+ do_div(hclk_freq, 100);
+ break;
+ case 0x2:
+ do_div(hclk_freq, 10);
+ break;
+ case 0x3:
+ break;
+ case 0x4:
+ hclk_freq = hclk_freq * 10;
+ break;
+ default:
+ host->cq_ssc1_time = 0x40;
+ return;
+ }
+
+ value = hclk_freq * timer_ns;
+ do_div(value, 1000000000);
+ host->cq_ssc1_time = value;
+}
+
static void msdc_cqe_enable(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
+ struct cqhci_host *cq_host = mmc->cqe_private;
/* enable cmdq irq */
writel(MSDC_INT_CMDQ, host->base + MSDC_INTEN);
@@ -2325,20 +2787,33 @@ static void msdc_cqe_enable(struct mmc_host *mmc)
msdc_set_busy_timeout(host, 20 * 1000000000ULL, 0);
/* default read data timeout 1s */
msdc_set_timeout(host, 1000000000ULL, 0);
+
+ /* Set the send status command idle timer */
+ cqhci_writel(cq_host, host->cq_ssc1_time, CQHCI_SSC1);
}
static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
{
struct msdc_host *host = mmc_priv(mmc);
+ unsigned int val = 0;
/* disable cmdq irq */
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ);
/* disable busy check */
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
+
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
+ if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CTRL, val,
+ !(val & MSDC_DMA_CTRL_STOP), 1, 3000)))
+ return;
+ if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val,
+ !(val & MSDC_DMA_CFG_STS), 1, 3000)))
+ return;
msdc_reset_hw(host);
}
}
@@ -2377,7 +2852,8 @@ static const struct mmc_host_ops mt_msdc_ops = {
.card_busy = msdc_card_busy,
.execute_tuning = msdc_execute_tuning,
.prepare_hs400_tuning = msdc_prepare_hs400_tuning,
- .hw_reset = msdc_hw_reset,
+ .execute_hs400_tuning = msdc_execute_hs400_tuning,
+ .card_hw_reset = msdc_hw_reset,
};
static const struct cqhci_host_ops msdc_cmdq_ops = {
@@ -2390,12 +2866,17 @@ static const struct cqhci_host_ops msdc_cmdq_ops = {
static void msdc_of_property_parse(struct platform_device *pdev,
struct msdc_host *host)
{
+ struct mmc_host *mmc = mmc_from_priv(host);
+
of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck",
&host->latch_ck);
of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
&host->hs400_ds_delay);
+ of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-ds-dly3",
+ &host->hs400_ds_dly3);
+
of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay",
&host->hs200_cmd_int_delay);
@@ -2408,6 +2889,14 @@ static void msdc_of_property_parse(struct platform_device *pdev,
else
host->hs400_cmd_resp_sel_rising = false;
+ if (of_property_read_u32(pdev->dev.of_node, "mediatek,tuning-step",
+ &host->tuning_step)) {
+ if (mmc->caps2 & MMC_CAP2_NO_MMC)
+ host->tuning_step = PAD_DELAY_FULL;
+ else
+ host->tuning_step = PAD_DELAY_HALF;
+ }
+
if (of_property_read_bool(pdev->dev.of_node,
"supports-cqe"))
host->cqhci = true;
@@ -2435,14 +2924,25 @@ static int msdc_of_clock_parse(struct platform_device *pdev,
/*source clock control gate is optional clock*/
host->src_clk_cg = devm_clk_get_optional(&pdev->dev, "source_cg");
if (IS_ERR(host->src_clk_cg))
- host->src_clk_cg = NULL;
+ return PTR_ERR(host->src_clk_cg);
- host->sys_clk_cg = devm_clk_get_optional(&pdev->dev, "sys_cg");
- if (IS_ERR(host->sys_clk_cg))
- host->sys_clk_cg = NULL;
+ /*
+ * Fallback for legacy device-trees: src_clk and HCLK use the same
+ * bit to control gating but they are parented to a different mux,
+ * hence if our intention is to gate only the source, required
+ * during a clk mode switch to avoid hw hangs, we need to gate
+ * its parent (specified as a different clock only on new DTs).
+ */
+ if (!host->src_clk_cg) {
+ host->src_clk_cg = clk_get_parent(host->src_clk);
+ if (IS_ERR(host->src_clk_cg))
+ return PTR_ERR(host->src_clk_cg);
+ }
/* If present, always enable for this clock gate */
- clk_prepare_enable(host->sys_clk_cg);
+ host->sys_clk_cg = devm_clk_get_optional_enabled(&pdev->dev, "sys_cg");
+ if (IS_ERR(host->sys_clk_cg))
+ host->sys_clk_cg = NULL;
host->bulk_clks[0].id = "pclk_cg";
host->bulk_clks[1].id = "axi_cg";
@@ -2461,7 +2961,6 @@ static int msdc_drv_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct msdc_host *host;
- struct resource *res;
int ret;
if (!pdev->dev.of_node) {
@@ -2470,74 +2969,87 @@ static int msdc_drv_probe(struct platform_device *pdev)
}
/* Allocate MMC host for this device */
- mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct msdc_host));
if (!mmc)
return -ENOMEM;
host = mmc_priv(mmc);
ret = mmc_of_parse(mmc);
if (ret)
- goto host_free;
+ return ret;
host->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(host->base)) {
- ret = PTR_ERR(host->base);
- goto host_free;
- }
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- host->top_base = devm_ioremap_resource(&pdev->dev, res);
+ host->dev_comp = of_device_get_match_data(&pdev->dev);
+
+ if (host->dev_comp->needs_top_base) {
+ host->top_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(host->top_base))
- host->top_base = NULL;
+ return PTR_ERR(host->top_base);
}
ret = mmc_regulator_get_supply(mmc);
if (ret)
- goto host_free;
+ return ret;
ret = msdc_of_clock_parse(pdev, host);
if (ret)
- goto host_free;
+ return ret;
host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
"hrst");
- if (IS_ERR(host->reset)) {
- ret = PTR_ERR(host->reset);
- goto host_free;
+ if (IS_ERR(host->reset))
+ return PTR_ERR(host->reset);
+
+ /* only eMMC has crypto property */
+ if (!(mmc->caps2 & MMC_CAP2_NO_MMC)) {
+ host->crypto_clk = devm_clk_get_optional(&pdev->dev, "crypto");
+ if (IS_ERR(host->crypto_clk))
+ return PTR_ERR(host->crypto_clk);
+ else if (host->crypto_clk)
+ mmc->caps2 |= MMC_CAP2_CRYPTO;
}
host->irq = platform_get_irq(pdev, 0);
- if (host->irq < 0) {
- ret = -EINVAL;
- goto host_free;
- }
+ if (host->irq < 0)
+ return host->irq;
host->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (IS_ERR(host->pinctrl)) {
- ret = PTR_ERR(host->pinctrl);
- dev_err(&pdev->dev, "Cannot find pinctrl!\n");
- goto host_free;
- }
+ if (IS_ERR(host->pinctrl))
+ return dev_err_probe(&pdev->dev, PTR_ERR(host->pinctrl),
+ "Cannot find pinctrl");
host->pins_default = pinctrl_lookup_state(host->pinctrl, "default");
if (IS_ERR(host->pins_default)) {
- ret = PTR_ERR(host->pins_default);
dev_err(&pdev->dev, "Cannot find pinctrl default!\n");
- goto host_free;
+ return PTR_ERR(host->pins_default);
}
host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
if (IS_ERR(host->pins_uhs)) {
- ret = PTR_ERR(host->pins_uhs);
dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n");
- goto host_free;
+ return PTR_ERR(host->pins_uhs);
+ }
+
+ /* Support for SDIO eint irq ? */
+ if ((mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) && (mmc->pm_caps & MMC_PM_KEEP_POWER)) {
+ host->eint_irq = platform_get_irq_byname_optional(pdev, "sdio_wakeup");
+ if (host->eint_irq > 0) {
+ host->pins_eint = pinctrl_lookup_state(host->pinctrl, "state_eint");
+ if (IS_ERR(host->pins_eint)) {
+ dev_err(&pdev->dev, "Cannot find pinctrl eint!\n");
+ host->pins_eint = NULL;
+ } else {
+ device_init_wakeup(&pdev->dev, true);
+ }
+ }
}
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
- host->dev_comp = of_device_get_match_data(&pdev->dev);
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
@@ -2547,7 +3059,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
- !mmc_can_gpio_cd(mmc) &&
+ !mmc_host_can_gpio_cd(mmc) &&
host->dev_comp->use_internal_cd) {
/*
* Is removable but no GPIO declared, so
@@ -2577,44 +3089,63 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->dma_mask = DMA_BIT_MASK(32);
mmc_dev(mmc)->dma_mask = &host->dma_mask;
+ host->timeout_clks = 3 * 1048576;
+ host->dma.gpd = dma_alloc_coherent(&pdev->dev,
+ 2 * sizeof(struct mt_gpdma_desc),
+ &host->dma.gpd_addr, GFP_KERNEL);
+ host->dma.bd = dma_alloc_coherent(&pdev->dev,
+ MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+ &host->dma.bd_addr, GFP_KERNEL);
+ if (!host->dma.gpd || !host->dma.bd) {
+ ret = -ENOMEM;
+ goto release_mem;
+ }
+ msdc_init_gpd_bd(host, &host->dma);
+ INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
+ spin_lock_init(&host->lock);
+
+ platform_set_drvdata(pdev, mmc);
+ ret = msdc_ungate_clock(host);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot ungate clocks!\n");
+ goto release_clk;
+ }
+ msdc_init_hw(host);
+
if (mmc->caps2 & MMC_CAP2_CQE) {
host->cq_host = devm_kzalloc(mmc->parent,
sizeof(*host->cq_host),
GFP_KERNEL);
if (!host->cq_host) {
ret = -ENOMEM;
- goto host_free;
+ goto release;
}
host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
host->cq_host->mmio = host->base + 0x800;
host->cq_host->ops = &msdc_cmdq_ops;
ret = cqhci_init(host->cq_host, mmc, true);
if (ret)
- goto host_free;
+ goto release;
mmc->max_segs = 128;
/* cqhci 16bit length */
/* 0 size, means 65536 so we don't have to -1 here */
mmc->max_seg_size = 64 * 1024;
- }
+ /* Reduce CIT to 0x40 that corresponds to 2.35us */
+ msdc_cqe_cit_cal(host, 2350);
+ } else if (mmc->caps2 & MMC_CAP2_NO_SDIO) {
+ /* Use HSQ on eMMC/SD (but not on SDIO) if HW CQE not supported */
+ struct mmc_hsq *hsq = devm_kzalloc(&pdev->dev, sizeof(*hsq), GFP_KERNEL);
+ if (!hsq) {
+ ret = -ENOMEM;
+ goto release;
+ }
- host->timeout_clks = 3 * 1048576;
- host->dma.gpd = dma_alloc_coherent(&pdev->dev,
- 2 * sizeof(struct mt_gpdma_desc),
- &host->dma.gpd_addr, GFP_KERNEL);
- host->dma.bd = dma_alloc_coherent(&pdev->dev,
- MAX_BD_NUM * sizeof(struct mt_bdma_desc),
- &host->dma.bd_addr, GFP_KERNEL);
- if (!host->dma.gpd || !host->dma.bd) {
- ret = -ENOMEM;
- goto release_mem;
- }
- msdc_init_gpd_bd(host, &host->dma);
- INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
- spin_lock_init(&host->lock);
+ ret = mmc_hsq_init(hsq, mmc);
+ if (ret)
+ goto release;
- platform_set_drvdata(pdev, mmc);
- msdc_ungate_clock(host);
- msdc_init_hw(host);
+ host->hsq_en = true;
+ }
ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
IRQF_TRIGGER_NONE, pdev->name, host);
@@ -2634,25 +3165,24 @@ static int msdc_drv_probe(struct platform_device *pdev)
end:
pm_runtime_disable(host->dev);
release:
- platform_set_drvdata(pdev, NULL);
msdc_deinit_hw(host);
+release_clk:
msdc_gate_clock(host);
+ platform_set_drvdata(pdev, NULL);
release_mem:
+ device_init_wakeup(&pdev->dev, false);
if (host->dma.gpd)
dma_free_coherent(&pdev->dev,
2 * sizeof(struct mt_gpdma_desc),
host->dma.gpd, host->dma.gpd_addr);
if (host->dma.bd)
dma_free_coherent(&pdev->dev,
- MAX_BD_NUM * sizeof(struct mt_bdma_desc),
- host->dma.bd, host->dma.bd_addr);
-host_free:
- mmc_free_host(mmc);
-
+ MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+ host->dma.bd, host->dma.bd_addr);
return ret;
}
-static int msdc_drv_remove(struct platform_device *pdev)
+static void msdc_drv_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct msdc_host *host;
@@ -2673,11 +3203,8 @@ static int msdc_drv_remove(struct platform_device *pdev)
2 * sizeof(struct mt_gpdma_desc),
host->dma.gpd, host->dma.gpd_addr);
dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
- host->dma.bd, host->dma.bd_addr);
-
- mmc_free_host(mmc);
-
- return 0;
+ host->dma.bd, host->dma.bd_addr);
+ device_init_wakeup(&pdev->dev, false);
}
static void msdc_save_reg(struct msdc_host *host)
@@ -2702,6 +3229,8 @@ static void msdc_save_reg(struct msdc_host *host)
readl(host->top_base + EMMC_TOP_CMD);
host->save_para.emmc50_pad_ds_tune =
readl(host->top_base + EMMC50_PAD_DS_TUNE);
+ host->save_para.loop_test_control =
+ readl(host->top_base + LOOP_TEST_CONTROL);
} else {
host->save_para.pad_tune = readl(host->base + tune_reg);
}
@@ -2712,6 +3241,15 @@ static void msdc_restore_reg(struct msdc_host *host)
struct mmc_host *mmc = mmc_from_priv(host);
u32 tune_reg = host->dev_comp->pad_tune_reg;
+ if (host->dev_comp->support_new_tx) {
+ sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN);
+ sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN);
+ }
+ if (host->dev_comp->support_new_rx) {
+ sdr_clr_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL);
+ sdr_set_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL);
+ }
+
writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
writel(host->save_para.iocon, host->base + MSDC_IOCON);
writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
@@ -2730,6 +3268,8 @@ static void msdc_restore_reg(struct msdc_host *host)
host->top_base + EMMC_TOP_CMD);
writel(host->save_para.emmc50_pad_ds_tune,
host->top_base + EMMC50_PAD_DS_TUNE);
+ writel(host->save_para.loop_test_control,
+ host->top_base + LOOP_TEST_CONTROL);
} else {
writel(host->save_para.pad_tune, host->base + tune_reg);
}
@@ -2738,48 +3278,90 @@ static void msdc_restore_reg(struct msdc_host *host)
__msdc_enable_sdio_irq(host, 1);
}
-static int __maybe_unused msdc_runtime_suspend(struct device *dev)
+static int msdc_runtime_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
+ if (host->hsq_en)
+ mmc_hsq_suspend(mmc);
+
msdc_save_reg(host);
+
+ if (sdio_irq_claimed(mmc)) {
+ if (host->pins_eint) {
+ disable_irq(host->irq);
+ pinctrl_select_state(host->pinctrl, host->pins_eint);
+ }
+
+ __msdc_enable_sdio_irq(host, 0);
+ }
msdc_gate_clock(host);
return 0;
}
-static int __maybe_unused msdc_runtime_resume(struct device *dev)
+static int msdc_runtime_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
+ int ret;
+
+ ret = msdc_ungate_clock(host);
+ if (ret)
+ return ret;
- msdc_ungate_clock(host);
msdc_restore_reg(host);
+
+ if (sdio_irq_claimed(mmc) && host->pins_eint) {
+ pinctrl_select_state(host->pinctrl, host->pins_uhs);
+ enable_irq(host->irq);
+ }
+
+ if (host->hsq_en)
+ mmc_hsq_resume(mmc);
+
return 0;
}
-static int __maybe_unused msdc_suspend(struct device *dev)
+static int msdc_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 val;
if (mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(mmc);
if (ret)
return ret;
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
}
+ /*
+ * Bump up runtime PM usage counter otherwise dev->power.needs_force_resume will
+ * not be marked as 1, pm_runtime_force_resume() will go out directly.
+ */
+ if (sdio_irq_claimed(mmc) && host->pins_eint)
+ pm_runtime_get_noresume(dev);
+
return pm_runtime_force_suspend(dev);
}
-static int __maybe_unused msdc_resume(struct device *dev)
+static int msdc_resume(struct device *dev)
{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msdc_host *host = mmc_priv(mmc);
+
+ if (sdio_irq_claimed(mmc) && host->pins_eint)
+ pm_runtime_put_noidle(dev);
+
return pm_runtime_force_resume(dev);
}
static const struct dev_pm_ops msdc_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume)
- SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume)
+ RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
};
static struct platform_driver mt_msdc_driver = {
@@ -2789,7 +3371,7 @@ static struct platform_driver mt_msdc_driver = {
.name = "mtk-msdc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = msdc_of_ids,
- .pm = &msdc_dev_pm_ops,
+ .pm = pm_ptr(&msdc_dev_pm_ops),
},
};
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 629efbe639c4..79df2fa89a3f 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -22,7 +22,7 @@
#include <linux/mmc/slot-gpio.h>
#include <linux/sizes.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "mvsdio.h"
@@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
host->pio_ptr = NULL;
host->pio_size = 0;
} else {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
@@ -464,7 +464,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
struct mmc_command *cmd = mrq->cmd;
u32 err_status = 0;
- del_timer(&host->timer);
+ timer_delete(&host->timer);
host->mrq = NULL;
host->intr_en &= MVSD_NOR_CARD_INT;
@@ -509,7 +509,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
static void mvsd_timeout_timer(struct timer_list *t)
{
- struct mvsd_host *host = from_timer(host, t, timer);
+ struct mvsd_host *host = timer_container_of(host, t, timer);
void __iomem *iobase = host->base;
struct mmc_request *mrq;
unsigned long flags;
@@ -704,13 +704,11 @@ static int mvsd_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -ENXIO;
+ return irq;
- mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto out;
- }
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
+ if (!mmc)
+ return -ENOMEM;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -724,11 +722,9 @@ static int mvsd_probe(struct platform_device *pdev)
* fixed rate clock).
*/
host->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(host->clk)) {
- dev_err(&pdev->dev, "no clock associated\n");
- ret = -EINVAL;
- goto out;
- }
+ if (IS_ERR(host->clk))
+ return dev_err_probe(&pdev->dev, -EINVAL, "no clock associated\n");
+
clk_prepare_enable(host->clk);
mmc->ops = &mvsd_ops;
@@ -787,30 +783,22 @@ static int mvsd_probe(struct platform_device *pdev)
return 0;
out:
- if (mmc) {
- if (!IS_ERR(host->clk))
- clk_disable_unprepare(host->clk);
- mmc_free_host(mmc);
- }
-
+ clk_disable_unprepare(host->clk);
return ret;
}
-static int mvsd_remove(struct platform_device *pdev)
+static void mvsd_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct mvsd_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
- del_timer_sync(&host->timer);
+ timer_delete_sync(&host->timer);
mvsd_power_down(host);
if (!IS_ERR(host->clk))
clk_disable_unprepare(host->clk);
- mmc_free_host(mmc);
-
- return 0;
}
static const struct of_device_id mvsdio_dt_ids[] = {
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 2fe6fcdbb1b3..c405cfb8b269 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,7 +31,6 @@
#include <linux/dmaengine.h>
#include <linux/types.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/mmc/slot-gpio.h>
@@ -39,7 +38,7 @@
#include <asm/irq.h>
#include <linux/platform_data/mmc-mxcmmc.h>
-#include <linux/platform_data/dma-imx.h>
+#include <linux/dma/imx-dma.h>
#define DRIVER_NAME "mxc-mmc"
#define MXCMCI_TIMEOUT_MS 10000
@@ -267,11 +266,18 @@ static inline void buffer_swap32(u32 *buf, int len)
static void mxcmci_swap_buffers(struct mmc_data *data)
{
- struct scatterlist *sg;
- int i;
+ struct sg_mapping_iter sgm;
+ u32 *buf;
+
+ sg_miter_start(&sgm, data->sg, data->sg_len,
+ SG_MITER_TO_SG | SG_MITER_FROM_SG);
- for_each_sg(data->sg, sg, data->sg_len, i)
- buffer_swap32(sg_virt(sg), sg->length);
+ while (sg_miter_next(&sgm)) {
+ buf = sgm.addr;
+ buffer_swap32(buf, sgm.length);
+ }
+
+ sg_miter_stop(&sgm);
}
#else
static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
@@ -346,7 +352,7 @@ static void mxcmci_dma_callback(void *data)
struct mxcmci_host *host = data;
u32 stat;
- del_timer(&host->watchdog);
+ timer_delete(&host->watchdog);
stat = mxcmci_readl(host, MMC_REG_STATUS);
@@ -527,10 +533,9 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
} while (1);
}
-static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
+static int mxcmci_pull(struct mxcmci_host *host, u32 *buf, int bytes)
{
unsigned int stat;
- u32 *buf = _buf;
while (bytes > 3) {
stat = mxcmci_poll_status(host,
@@ -556,10 +561,9 @@ static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
return 0;
}
-static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
+static int mxcmci_push(struct mxcmci_host *host, u32 *buf, int bytes)
{
unsigned int stat;
- u32 *buf = _buf;
while (bytes > 3) {
stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
@@ -587,31 +591,39 @@ static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
static int mxcmci_transfer_data(struct mxcmci_host *host)
{
struct mmc_data *data = host->req->data;
- struct scatterlist *sg;
- int stat, i;
+ struct sg_mapping_iter sgm;
+ int stat;
+ u32 *buf;
host->data = data;
host->datasize = 0;
+ sg_miter_start(&sgm, data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
if (data->flags & MMC_DATA_READ) {
- for_each_sg(data->sg, sg, data->sg_len, i) {
- stat = mxcmci_pull(host, sg_virt(sg), sg->length);
+ while (sg_miter_next(&sgm)) {
+ buf = sgm.addr;
+ stat = mxcmci_pull(host, buf, sgm.length);
if (stat)
- return stat;
- host->datasize += sg->length;
+ goto transfer_error;
+ host->datasize += sgm.length;
}
} else {
- for_each_sg(data->sg, sg, data->sg_len, i) {
- stat = mxcmci_push(host, sg_virt(sg), sg->length);
+ while (sg_miter_next(&sgm)) {
+ buf = sgm.addr;
+ stat = mxcmci_push(host, buf, sgm.length);
if (stat)
- return stat;
- host->datasize += sg->length;
+ goto transfer_error;
+ host->datasize += sgm.length;
}
stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
if (stat)
- return stat;
+ goto transfer_error;
}
- return 0;
+
+transfer_error:
+ sg_miter_stop(&sgm);
+ return stat;
}
static void mxcmci_datawork(struct work_struct *work)
@@ -725,7 +737,7 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
mxcmci_cmd_done(host, stat);
if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) {
- del_timer(&host->watchdog);
+ timer_delete(&host->watchdog);
mxcmci_data_done(host, stat);
}
@@ -923,7 +935,7 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
* One way to prevent this is to only allow 1-bit transfers.
*/
- if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO)
+ if (is_imx31_mmc(mxcmci) && mmc_card_sdio(card))
host->caps &= ~MMC_CAP_4_BIT_DATA;
else
host->caps |= MMC_CAP_4_BIT_DATA;
@@ -943,7 +955,7 @@ static bool filter(struct dma_chan *chan, void *param)
static void mxcmci_watchdog(struct timer_list *t)
{
- struct mxcmci_host *host = from_timer(host, t, watchdog);
+ struct mxcmci_host *host = timer_container_of(host, t, watchdog);
struct mmc_request *req = host->req;
unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
@@ -983,34 +995,31 @@ static int mxcmci_probe(struct platform_device *pdev)
struct mxcmci_host *host;
struct resource *res;
int ret = 0, irq;
- bool dat3_card_detect = false;
+ bool dat3_card_detect;
dma_cap_mask_t mask;
struct imxmmc_platform_data *pdata = pdev->dev.platform_data;
pr_info("i.MX/MPC512x SDHC driver\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
host = mmc_priv(mmc);
- host->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(host->base)) {
- ret = PTR_ERR(host->base);
- goto out_free;
- }
+ host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
host->phys_base = res->start;
ret = mmc_of_parse(mmc);
if (ret)
- goto out_free;
+ return ret;
mmc->ops = &mxcmci_ops;
/* For devicetree parsing, the bus width is read from devicetree */
@@ -1025,7 +1034,7 @@ static int mxcmci_probe(struct platform_device *pdev)
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- host->devtype = (enum mxcmci_type)of_device_get_match_data(&pdev->dev);
+ host->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
/* adjust max_segs after devtype detection */
if (!is_mpc512x_mmc(host))
@@ -1037,13 +1046,13 @@ static int mxcmci_probe(struct platform_device *pdev)
if (pdata)
dat3_card_detect = pdata->dat3_card_detect;
- else if (mmc_card_is_removable(mmc)
- && !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
- dat3_card_detect = true;
+ else
+ dat3_card_detect = mmc_card_is_removable(mmc) &&
+ !of_property_present(pdev->dev.of_node, "cd-gpios");
ret = mmc_regulator_get_supply(mmc);
if (ret)
- goto out_free;
+ return ret;
if (!mmc->ocr_avail) {
if (pdata && pdata->ocr_avail)
@@ -1059,20 +1068,16 @@ static int mxcmci_probe(struct platform_device *pdev)
host->default_irq_mask = 0;
host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(host->clk_ipg)) {
- ret = PTR_ERR(host->clk_ipg);
- goto out_free;
- }
+ if (IS_ERR(host->clk_ipg))
+ return PTR_ERR(host->clk_ipg);
host->clk_per = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(host->clk_per)) {
- ret = PTR_ERR(host->clk_per);
- goto out_free;
- }
+ if (IS_ERR(host->clk_per))
+ return PTR_ERR(host->clk_per);
ret = clk_prepare_enable(host->clk_per);
if (ret)
- goto out_free;
+ return ret;
ret = clk_prepare_enable(host->clk_ipg);
if (ret)
@@ -1143,7 +1148,9 @@ static int mxcmci_probe(struct platform_device *pdev)
timer_setup(&host->watchdog, mxcmci_watchdog, 0);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto out_free_dma;
return 0;
@@ -1156,13 +1163,10 @@ out_clk_put:
out_clk_per_put:
clk_disable_unprepare(host->clk_per);
-out_free:
- mmc_free_host(mmc);
-
return ret;
}
-static int mxcmci_remove(struct platform_device *pdev)
+static void mxcmci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct mxcmci_host *host = mmc_priv(mmc);
@@ -1177,13 +1181,8 @@ static int mxcmci_remove(struct platform_device *pdev)
clk_disable_unprepare(host->clk_per);
clk_disable_unprepare(host->clk_ipg);
-
- mmc_free_host(mmc);
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int mxcmci_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
@@ -1210,9 +1209,8 @@ static int mxcmci_resume(struct device *dev)
return ret;
}
-#endif
-static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
static struct platform_driver mxcmci_driver = {
.probe = mxcmci_probe,
@@ -1220,7 +1218,7 @@ static struct platform_driver mxcmci_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &mxcmci_pm_ops,
+ .pm = pm_sleep_ptr(&mxcmci_pm_ops),
.of_match_table = mxcmci_of_match,
}
};
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 947581de7860..7c7c52d9e8e7 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -552,6 +551,11 @@ static const struct of_device_id mxs_mmc_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
+static void mxs_mmc_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
static int mxs_mmc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -565,7 +569,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
if (irq_err < 0)
return irq_err;
- mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
@@ -573,10 +577,8 @@ static int mxs_mmc_probe(struct platform_device *pdev)
ssp = &host->ssp;
ssp->dev = &pdev->dev;
ssp->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(ssp->base)) {
- ret = PTR_ERR(ssp->base);
- goto out_mmc_free;
- }
+ if (IS_ERR(ssp->base))
+ return PTR_ERR(ssp->base);
ssp->devid = (enum mxs_ssp_id)of_device_get_match_data(&pdev->dev);
@@ -586,21 +588,23 @@ static int mxs_mmc_probe(struct platform_device *pdev)
reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
if (!IS_ERR(reg_vmmc)) {
ret = regulator_enable(reg_vmmc);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to enable vmmc regulator: %d\n", ret);
- goto out_mmc_free;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to enable vmmc regulator\n");
+
+ ret = devm_add_action_or_reset(&pdev->dev, mxs_mmc_regulator_disable,
+ reg_vmmc);
+ if (ret)
+ return ret;
}
ssp->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(ssp->clk)) {
- ret = PTR_ERR(ssp->clk);
- goto out_mmc_free;
- }
+ if (IS_ERR(ssp->clk))
+ return PTR_ERR(ssp->clk);
+
ret = clk_prepare_enable(ssp->clk);
if (ret)
- goto out_mmc_free;
+ return ret;
ret = mxs_mmc_reset(host);
if (ret) {
@@ -659,12 +663,10 @@ out_free_dma:
dma_release_channel(ssp->dmach);
out_clk_disable:
clk_disable_unprepare(ssp->clk);
-out_mmc_free:
- mmc_free_host(mmc);
return ret;
}
-static int mxs_mmc_remove(struct platform_device *pdev)
+static void mxs_mmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct mxs_mmc_host *host = mmc_priv(mmc);
@@ -676,13 +678,8 @@ static int mxs_mmc_remove(struct platform_device *pdev)
dma_release_channel(ssp->dmach);
clk_disable_unprepare(ssp->clk);
-
- mmc_free_host(mmc);
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int mxs_mmc_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
@@ -701,9 +698,8 @@ static int mxs_mmc_resume(struct device *dev)
return clk_prepare_enable(ssp->clk);
}
-#endif
-static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
static struct platform_driver mxs_mmc_driver = {
.probe = mxs_mmc_probe,
@@ -711,7 +707,7 @@ static struct platform_driver mxs_mmc_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &mxs_mmc_pm_ops,
+ .pm = pm_sleep_ptr(&mxs_mmc_pm_ops),
.of_match_table = mxs_mmc_dt_ids,
},
};
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 3629550528b6..05939f30a5ae 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -19,6 +19,7 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
+MODULE_DESCRIPTION("OpenFirmware bindings for the MMC-over-SPI driver");
MODULE_LICENSE("GPL");
struct of_mmc_spi {
@@ -70,6 +71,10 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
} else {
oms->pdata.caps |= MMC_CAP_NEEDS_POLL;
}
+ if (device_property_read_bool(dev, "cap-sd-highspeed"))
+ oms->pdata.caps |= MMC_CAP_SD_HIGHSPEED;
+ if (device_property_read_bool(dev, "cap-mmc-highspeed"))
+ oms->pdata.caps |= MMC_CAP_MMC_HIGHSPEED;
dev->platform_data = &oms->pdata;
return dev->platform_data;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 5e5af34090f1..527b89a5ed70 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -26,7 +26,9 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_data/mmc-omap.h>
+#include <linux/workqueue.h>
#define OMAP_MMC_REG_CMD 0x00
@@ -104,13 +106,16 @@ struct mmc_omap_slot {
u16 power_mode;
unsigned int fclk_freq;
- struct tasklet_struct cover_tasklet;
+ struct work_struct cover_bh_work;
struct timer_list cover_timer;
unsigned cover_open;
struct mmc_request *mrq;
struct mmc_omap_host *host;
struct mmc_host *mmc;
+ struct gpio_desc *vsd;
+ struct gpio_desc *vio;
+ struct gpio_desc *cover;
struct omap_mmc_slot_data *pdata;
};
@@ -133,6 +138,7 @@ struct mmc_omap_host {
int irq;
unsigned char bus_mode;
unsigned int reg_shift;
+ struct gpio_desc *slot_switch;
struct work_struct cmd_abort_work;
unsigned abort:1;
@@ -143,10 +149,8 @@ struct mmc_omap_host {
struct work_struct send_stop_work;
struct mmc_data *stop_data;
+ struct sg_mapping_iter sg_miter;
unsigned int sg_len;
- int sg_idx;
- u16 * buffer;
- u32 buffer_bytes_left;
u32 total_bytes_left;
unsigned features;
@@ -210,14 +214,19 @@ static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed)
host->mmc = slot->mmc;
spin_unlock_irqrestore(&host->slot_lock, flags);
no_claim:
- del_timer(&host->clk_timer);
+ timer_delete(&host->clk_timer);
if (host->current_slot != slot || !claimed)
mmc_omap_fclk_offdelay(host->current_slot);
if (host->current_slot != slot) {
OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00);
- if (host->pdata->switch_slot != NULL)
- host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id);
+ if (host->slot_switch)
+ /*
+ * With two slots and a simple GPIO switch, setting
+ * the GPIO to 0 selects slot ID 0, setting it to 1
+ * selects slot ID 1.
+ */
+ gpiod_set_value(host->slot_switch, slot->id);
host->current_slot = slot;
}
@@ -264,7 +273,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
/* Keeps clock running for at least 8 cycles on valid freq */
mod_timer(&host->clk_timer, jiffies + HZ/10);
else {
- del_timer(&host->clk_timer);
+ timer_delete(&host->clk_timer);
mmc_omap_fclk_offdelay(slot);
mmc_omap_fclk_enable(host, 0);
}
@@ -297,6 +306,9 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
static inline
int mmc_omap_cover_is_open(struct mmc_omap_slot *slot)
{
+ /* If we have a GPIO then use that */
+ if (slot->cover)
+ return gpiod_get_value(slot->cover);
if (slot->pdata->get_cover_state)
return slot->pdata->get_cover_state(mmc_dev(slot->mmc),
slot->id);
@@ -314,7 +326,7 @@ mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
"closed");
}
-static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
+static DEVICE_ATTR(cover_switch, 0444, mmc_omap_show_cover_switch, NULL);
static ssize_t
mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
@@ -326,7 +338,7 @@ mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%s\n", slot->pdata->name);
}
-static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
+static DEVICE_ATTR(slot_name, 0444, mmc_omap_show_slot_name, NULL);
static void
mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
@@ -443,6 +455,8 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
{
if (host->dma_in_use)
mmc_omap_release_dma(host, data, data->error);
+ else
+ sg_miter_stop(&host->sg_miter);
host->data = NULL;
host->sg_len = 0;
@@ -550,7 +564,7 @@ mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
{
host->cmd = NULL;
- del_timer(&host->cmd_abort_timer);
+ timer_delete(&host->cmd_abort_timer);
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
@@ -625,7 +639,8 @@ static void mmc_omap_abort_command(struct work_struct *work)
static void
mmc_omap_cmd_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer);
+ struct mmc_omap_host *host = timer_container_of(host, t,
+ cmd_abort_timer);
unsigned long flags;
spin_lock_irqsave(&host->slot_lock, flags);
@@ -638,23 +653,10 @@ mmc_omap_cmd_timer(struct timer_list *t)
spin_unlock_irqrestore(&host->slot_lock, flags);
}
-/* PIO only */
-static void
-mmc_omap_sg_to_buf(struct mmc_omap_host *host)
-{
- struct scatterlist *sg;
-
- sg = host->data->sg + host->sg_idx;
- host->buffer_bytes_left = sg->length;
- host->buffer = sg_virt(sg);
- if (host->buffer_bytes_left > host->total_bytes_left)
- host->buffer_bytes_left = host->total_bytes_left;
-}
-
static void
mmc_omap_clk_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = from_timer(host, t, clk_timer);
+ struct mmc_omap_host *host = timer_container_of(host, t, clk_timer);
mmc_omap_fclk_enable(host, 0);
}
@@ -663,33 +665,37 @@ mmc_omap_clk_timer(struct timer_list *t)
static void
mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
int n, nwords;
+ u16 *buffer;
- if (host->buffer_bytes_left == 0) {
- host->sg_idx++;
- BUG_ON(host->sg_idx == host->sg_len);
- mmc_omap_sg_to_buf(host);
+ if (!sg_miter_next(sgm)) {
+ /* This should not happen */
+ dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n");
+ return;
}
+ buffer = sgm->addr;
+
n = 64;
- if (n > host->buffer_bytes_left)
- n = host->buffer_bytes_left;
+ if (n > sgm->length)
+ n = sgm->length;
+ if (n > host->total_bytes_left)
+ n = host->total_bytes_left;
/* Round up to handle odd number of bytes to transfer */
nwords = DIV_ROUND_UP(n, 2);
- host->buffer_bytes_left -= n;
+ sgm->consumed = n;
host->total_bytes_left -= n;
host->data->bytes_xfered += n;
if (write) {
__raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
- host->buffer, nwords);
+ buffer, nwords);
} else {
__raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
- host->buffer, nwords);
+ buffer, nwords);
}
-
- host->buffer += nwords;
}
#ifdef CONFIG_MMC_DEBUG
@@ -831,7 +837,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
}
if (cmd_error && host->data) {
- del_timer(&host->cmd_abort_timer);
+ timer_delete(&host->cmd_abort_timer);
host->abort = 1;
OMAP_MMC_WRITE(host, IE, 0);
disable_irq_nosync(host->irq);
@@ -869,18 +875,18 @@ void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
}
- tasklet_hi_schedule(&slot->cover_tasklet);
+ queue_work(system_bh_highpri_wq, &slot->cover_bh_work);
}
static void mmc_omap_cover_timer(struct timer_list *t)
{
- struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer);
- tasklet_schedule(&slot->cover_tasklet);
+ struct mmc_omap_slot *slot = timer_container_of(slot, t, cover_timer);
+ queue_work(system_bh_wq, &slot->cover_bh_work);
}
-static void mmc_omap_cover_handler(struct tasklet_struct *t)
+static void mmc_omap_cover_bh_handler(struct work_struct *t)
{
- struct mmc_omap_slot *slot = from_tasklet(slot, t, cover_tasklet);
+ struct mmc_omap_slot *slot = from_work(slot, t, cover_bh_work);
int cover_open = mmc_omap_cover_is_open(slot);
mmc_detect_change(slot->mmc, 0);
@@ -943,6 +949,7 @@ static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_reque
static void
mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
{
+ unsigned int miter_flags = SG_MITER_ATOMIC; /* Used from IRQ */
struct mmc_data *data = req->data;
int i, use_dma = 1, block_size;
struct scatterlist *sg;
@@ -977,7 +984,6 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
}
}
- host->sg_idx = 0;
if (use_dma) {
enum dma_data_direction dma_data_dir;
struct dma_async_tx_descriptor *tx;
@@ -1058,7 +1064,11 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
OMAP_MMC_WRITE(host, BUF, 0x1f1f);
host->total_bytes_left = data->blocks * block_size;
host->sg_len = sg_len;
- mmc_omap_sg_to_buf(host);
+ if (data->flags & MMC_DATA_READ)
+ miter_flags |= SG_MITER_TO_SG;
+ else
+ miter_flags |= SG_MITER_FROM_SG;
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, miter_flags);
host->dma_in_use = 0;
}
@@ -1106,6 +1116,26 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
host = slot->host;
+ if (power_on) {
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(1);
+ }
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(1);
+ }
+ } else {
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(50);
+ }
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(50);
+ }
+ }
+
if (slot->pdata->set_power != NULL)
slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
vdd);
@@ -1229,7 +1259,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
struct mmc_host *mmc;
int r;
- mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
+ mmc = devm_mmc_alloc_host(host->dev, sizeof(*slot));
if (mmc == NULL)
return -ENOMEM;
@@ -1240,6 +1270,25 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->power_mode = MMC_POWER_UNDEFINED;
slot->pdata = &host->pdata->slots[id];
+ /* Check for some optional GPIO controls */
+ slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
+ id, GPIOD_OUT_LOW);
+ if (IS_ERR(slot->vsd))
+ return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
+ "error looking up VSD GPIO\n");
+
+ slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
+ id, GPIOD_OUT_LOW);
+ if (IS_ERR(slot->vio))
+ return dev_err_probe(host->dev, PTR_ERR(slot->vio),
+ "error looking up VIO GPIO\n");
+
+ slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
+ id, GPIOD_IN);
+ if (IS_ERR(slot->cover))
+ return dev_err_probe(host->dev, PTR_ERR(slot->cover),
+ "error looking up cover switch GPIO\n");
+
host->slots[id] = slot;
mmc->caps = 0;
@@ -1269,7 +1318,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
if (slot->pdata->get_cover_state != NULL) {
timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0);
- tasklet_setup(&slot->cover_tasklet, mmc_omap_cover_handler);
+ INIT_WORK(&slot->cover_bh_work, mmc_omap_cover_bh_handler);
}
r = mmc_add_host(mmc);
@@ -1288,7 +1337,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
&dev_attr_cover_switch);
if (r < 0)
goto err_remove_slot_name;
- tasklet_schedule(&slot->cover_tasklet);
+ queue_work(system_bh_wq, &slot->cover_bh_work);
}
return 0;
@@ -1298,7 +1347,6 @@ err_remove_slot_name:
device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
err_remove_host:
mmc_remove_host(mmc);
- mmc_free_host(mmc);
return r;
}
@@ -1311,12 +1359,11 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
if (slot->pdata->get_cover_state != NULL)
device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
- tasklet_kill(&slot->cover_tasklet);
- del_timer_sync(&slot->cover_timer);
+ cancel_work_sync(&slot->cover_bh_work);
+ timer_delete_sync(&slot->cover_timer);
flush_workqueue(slot->host->mmc_omap_wq);
mmc_remove_host(mmc);
- mmc_free_host(mmc);
}
static int mmc_omap_probe(struct platform_device *pdev)
@@ -1343,10 +1390,9 @@ static int mmc_omap_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -ENXIO;
+ return irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->virt_base = devm_ioremap_resource(&pdev->dev, res);
+ host->virt_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(host->virt_base))
return PTR_ERR(host->virt_base);
@@ -1368,13 +1414,19 @@ static int mmc_omap_probe(struct platform_device *pdev)
host->dev = &pdev->dev;
platform_set_drvdata(pdev, host);
+ host->slot_switch = devm_gpiod_get_optional(host->dev, "switch",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(host->slot_switch))
+ return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
+ "error looking up slot switch GPIO\n");
+
host->id = pdev->id;
host->irq = irq;
host->phys_base = res->start;
host->iclk = clk_get(&pdev->dev, "ick");
if (IS_ERR(host->iclk))
return PTR_ERR(host->iclk);
- clk_enable(host->iclk);
+ clk_prepare_enable(host->iclk);
host->fclk = clk_get(&pdev->dev, "fck");
if (IS_ERR(host->fclk)) {
@@ -1382,16 +1434,18 @@ static int mmc_omap_probe(struct platform_device *pdev)
goto err_free_iclk;
}
+ ret = clk_prepare(host->fclk);
+ if (ret)
+ goto err_put_fclk;
+
host->dma_tx_burst = -1;
host->dma_rx_burst = -1;
host->dma_tx = dma_request_chan(&pdev->dev, "tx");
if (IS_ERR(host->dma_tx)) {
ret = PTR_ERR(host->dma_tx);
- if (ret == -EPROBE_DEFER) {
- clk_put(host->fclk);
- goto err_free_iclk;
- }
+ if (ret == -EPROBE_DEFER)
+ goto err_free_fclk;
host->dma_tx = NULL;
dev_warn(host->dev, "TX DMA channel request failed\n");
@@ -1403,8 +1457,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
if (ret == -EPROBE_DEFER) {
if (host->dma_tx)
dma_release_channel(host->dma_tx);
- clk_put(host->fclk);
- goto err_free_iclk;
+ goto err_free_fclk;
}
host->dma_rx = NULL;
@@ -1424,7 +1477,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
host->nr_slots = pdata->nr_slots;
host->reg_shift = (mmc_omap7xx() ? 1 : 2);
- host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+ host->mmc_omap_wq = alloc_workqueue("mmc_omap", WQ_PERCPU, 0);
if (!host->mmc_omap_wq) {
ret = -ENOMEM;
goto err_plat_cleanup;
@@ -1454,14 +1507,17 @@ err_free_dma:
dma_release_channel(host->dma_tx);
if (host->dma_rx)
dma_release_channel(host->dma_rx);
+err_free_fclk:
+ clk_unprepare(host->fclk);
+err_put_fclk:
clk_put(host->fclk);
err_free_iclk:
- clk_disable(host->iclk);
+ clk_disable_unprepare(host->iclk);
clk_put(host->iclk);
return ret;
}
-static int mmc_omap_remove(struct platform_device *pdev)
+static void mmc_omap_remove(struct platform_device *pdev)
{
struct mmc_omap_host *host = platform_get_drvdata(pdev);
int i;
@@ -1476,8 +1532,9 @@ static int mmc_omap_remove(struct platform_device *pdev)
mmc_omap_fclk_enable(host, 0);
free_irq(host->irq, host);
+ clk_unprepare(host->fclk);
clk_put(host->fclk);
- clk_disable(host->iclk);
+ clk_disable_unprepare(host->iclk);
clk_put(host->iclk);
if (host->dma_tx)
@@ -1486,8 +1543,6 @@ static int mmc_omap_remove(struct platform_device *pdev)
dma_release_channel(host->dma_rx);
destroy_workqueue(host->mmc_omap_wq);
-
- return 0;
}
#if IS_BUILTIN(CONFIG_OF)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 2f8038d69f67..58c881f2725b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -620,8 +620,6 @@ static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
}
-#ifdef CONFIG_PM
-
/*
* Restore the MMC host context, if it was lost as result of a
* power state change.
@@ -689,6 +687,7 @@ out:
return 0;
}
+#ifdef CONFIG_PM
/*
* Save the MMC host context (store the number of power state changes so far).
*/
@@ -702,11 +701,6 @@ static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
#else
-static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
-{
- return 0;
-}
-
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
{
}
@@ -752,7 +746,7 @@ omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%s\n", mmc_pdata(host)->name);
}
-static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
+static DEVICE_ATTR(slot_name, 0444, omap_hsmmc_show_slot_name, NULL);
/*
* Configure the response type and send the cmd.
@@ -1504,41 +1498,6 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
omap_hsmmc_set_bus_mode(host);
}
-static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
-{
- struct omap_hsmmc_host *host = mmc_priv(mmc);
-
- if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
- struct device_node *np = mmc_dev(mmc)->of_node;
-
- /*
- * REVISIT: should be moved to sdio core and made more
- * general e.g. by expanding the DT bindings of child nodes
- * to provide a mechanism to provide this information:
- * Documentation/devicetree/bindings/mmc/mmc-card.txt
- */
-
- np = of_get_compatible_child(np, "ti,wl1251");
- if (np) {
- /*
- * We have TI wl1251 attached to MMC3. Pass this
- * information to the SDIO core because it can't be
- * probed by normal methods.
- */
-
- dev_info(host->dev, "found wl1251\n");
- card->quirks |= MMC_QUIRK_NONSTD_SDIO;
- card->cccr.wide_bus = 1;
- card->cis.vendor = 0x104c;
- card->cis.device = 0x9066;
- card->cis.blksize = 512;
- card->cis.max_dtr = 24000000;
- card->ocr = 0x80;
- of_node_put(np);
- }
- }
-}
-
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
@@ -1665,7 +1624,6 @@ static struct mmc_host_ops omap_hsmmc_ops = {
.set_ios = omap_hsmmc_set_ios,
.get_cd = mmc_gpio_get_cd,
.get_ro = mmc_gpio_get_ro,
- .init_card = omap_hsmmc_init_card,
.enable_sdio_irq = omap_hsmmc_enable_sdio_irq,
};
@@ -1704,7 +1662,6 @@ static int mmc_regs_show(struct seq_file *s, void *data)
seq_printf(s, "CAPA:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, CAPA));
- pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
return 0;
@@ -1715,7 +1672,7 @@ DEFINE_SHOW_ATTRIBUTE(mmc_regs);
static void omap_hsmmc_debugfs(struct mmc_host *mmc)
{
if (mmc->debugfs_root)
- debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root,
+ debugfs_create_file("regs", 0400, mmc->debugfs_root,
mmc, &mmc_regs_fops);
}
@@ -1777,18 +1734,18 @@ static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
if (legacy && legacy->name)
pdata->name = legacy->name;
- if (of_find_property(np, "ti,dual-volt", NULL))
+ if (of_property_read_bool(np, "ti,dual-volt"))
pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;
- if (of_find_property(np, "ti,non-removable", NULL)) {
+ if (of_property_read_bool(np, "ti,non-removable")) {
pdata->nonremovable = true;
pdata->no_regulator_off_init = true;
}
- if (of_find_property(np, "ti,needs-special-reset", NULL))
+ if (of_property_read_bool(np, "ti,needs-special-reset"))
pdata->features |= HSMMC_HAS_UPDATED_RESET;
- if (of_find_property(np, "ti,needs-special-hs-handling", NULL))
+ if (of_property_read_bool(np, "ti,needs-special-hs-handling"))
pdata->features |= HSMMC_HAS_HSPE_SUPPORT;
return pdata;
@@ -1831,24 +1788,21 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
return -ENXIO;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (res == NULL || irq < 0)
- return -ENXIO;
+ if (irq < 0)
+ return irq;
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
- mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto err;
- }
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
+ if (!mmc)
+ return -ENOMEM;
ret = mmc_of_parse(mmc);
if (ret)
- goto err1;
+ return ret;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -1884,7 +1838,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
if (IS_ERR(host->fclk)) {
ret = PTR_ERR(host->fclk);
host->fclk = NULL;
- goto err1;
+ return ret;
}
if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
@@ -1987,7 +1941,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
if (!ret)
mmc->caps |= MMC_CAP_SDIO_IRQ;
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto err_irq;
if (mmc_pdata(host)->name != NULL) {
ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
@@ -1996,7 +1952,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
}
omap_hsmmc_debugfs(mmc);
- pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
return 0;
@@ -2013,13 +1968,10 @@ err_irq:
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_disable_unprepare(host->dbclk);
-err1:
- mmc_free_host(mmc);
-err:
return ret;
}
-static int omap_hsmmc_remove(struct platform_device *pdev)
+static void omap_hsmmc_remove(struct platform_device *pdev)
{
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
@@ -2035,13 +1987,8 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_disable(host->dev);
device_init_wakeup(&pdev->dev, false);
clk_disable_unprepare(host->dbclk);
-
- mmc_free_host(host->mmc);
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int omap_hsmmc_suspend(struct device *dev)
{
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
@@ -2080,11 +2027,9 @@ static int omap_hsmmc_resume(struct device *dev)
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
omap_hsmmc_conf_bus_power(host);
- pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
return 0;
}
-#endif
static int omap_hsmmc_runtime_suspend(struct device *dev)
{
@@ -2155,9 +2100,8 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
- .runtime_suspend = omap_hsmmc_runtime_suspend,
- .runtime_resume = omap_hsmmc_runtime_resume,
+ SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
+ RUNTIME_PM_OPS(omap_hsmmc_runtime_suspend, omap_hsmmc_runtime_resume, NULL)
};
static struct platform_driver omap_hsmmc_driver = {
@@ -2166,7 +2110,7 @@ static struct platform_driver omap_hsmmc_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &omap_hsmmc_dev_pm_ops,
+ .pm = pm_ptr(&omap_hsmmc_dev_pm_ops),
.of_match_table = of_match_ptr(omap_mmc_of_match),
},
};
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
index 3dc143b03939..dc585726b66e 100644
--- a/drivers/mmc/host/owl-mmc.c
+++ b/drivers/mmc/host/owl-mmc.c
@@ -16,8 +16,9 @@
#include <linux/interrupt.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
@@ -566,7 +567,7 @@ static int owl_mmc_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- mmc = mmc_alloc_host(sizeof(struct owl_mmc_host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*owl_host));
if (!mmc) {
dev_err(&pdev->dev, "mmc alloc host failed\n");
return -ENOMEM;
@@ -578,26 +579,19 @@ static int owl_mmc_probe(struct platform_device *pdev)
owl_host->mmc = mmc;
spin_lock_init(&owl_host->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- owl_host->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(owl_host->base)) {
- ret = PTR_ERR(owl_host->base);
- goto err_free_host;
- }
+ owl_host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(owl_host->base))
+ return PTR_ERR(owl_host->base);
owl_host->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(owl_host->clk)) {
- dev_err(&pdev->dev, "No clock defined\n");
- ret = PTR_ERR(owl_host->clk);
- goto err_free_host;
- }
+ if (IS_ERR(owl_host->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(owl_host->clk),
+ "No clock defined\n");
owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(owl_host->reset)) {
- dev_err(&pdev->dev, "Could not get reset control\n");
- ret = PTR_ERR(owl_host->reset);
- goto err_free_host;
- }
+ if (IS_ERR(owl_host->reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(owl_host->reset),
+ "Could not get reset control\n");
mmc->ops = &owl_mmc_ops;
mmc->max_blk_count = 512;
@@ -616,16 +610,14 @@ static int owl_mmc_probe(struct platform_device *pdev)
ret = mmc_of_parse(mmc);
if (ret)
- goto err_free_host;
+ return ret;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
- if (IS_ERR(owl_host->dma)) {
- dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
- ret = PTR_ERR(owl_host->dma);
- goto err_free_host;
- }
+ if (IS_ERR(owl_host->dma))
+ return dev_err_probe(&pdev->dev, PTR_ERR(owl_host->dma),
+ "Failed to get external DMA channel.\n");
dev_info(&pdev->dev, "Using %s for DMA transfers\n",
dma_chan_name(owl_host->dma));
@@ -638,7 +630,7 @@ static int owl_mmc_probe(struct platform_device *pdev)
owl_host->irq = platform_get_irq(pdev, 0);
if (owl_host->irq < 0) {
- ret = -EINVAL;
+ ret = owl_host->irq;
goto err_release_channel;
}
@@ -662,13 +654,11 @@ static int owl_mmc_probe(struct platform_device *pdev)
err_release_channel:
dma_release_channel(owl_host->dma);
-err_free_host:
- mmc_free_host(mmc);
return ret;
}
-static int owl_mmc_remove(struct platform_device *pdev)
+static void owl_mmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct owl_mmc_host *owl_host = mmc_priv(mmc);
@@ -676,9 +666,6 @@ static int owl_mmc_remove(struct platform_device *pdev)
mmc_remove_host(mmc);
disable_irq(owl_host->irq);
dma_release_channel(owl_host->dma);
- mmc_free_host(mmc);
-
- return 0;
}
static const struct of_device_id owl_mmc_of_match[] = {
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 316393c694d7..b5ea058ed467 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -30,11 +30,10 @@
#include <linux/gpio/consumer.h>
#include <linux/gfp.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/soc/pxa/cpu.h>
#include <linux/sizes.h>
-#include <mach/hardware.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "pxamci.h"
@@ -612,16 +611,13 @@ static int pxamci_probe(struct platform_device *pdev)
struct resource *r;
int ret, irq;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- mmc = mmc_alloc_host(sizeof(struct pxamci_host), dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto out;
- }
+ mmc = devm_mmc_alloc_host(dev, sizeof(*host));
+ if (!mmc)
+ return -ENOMEM;
mmc->ops = &pxamci_ops;
@@ -656,11 +652,9 @@ static int pxamci_probe(struct platform_device *pdev)
host->clkrt = CLKRT_OFF;
host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- host->clk = NULL;
- goto out;
- }
+ if (IS_ERR(host->clk))
+ return dev_err_probe(dev, PTR_ERR(host->clk),
+ "Failed to acquire clock\n");
host->clkrate = clk_get_rate(host->clk);
@@ -685,14 +679,12 @@ static int pxamci_probe(struct platform_device *pdev)
}
spin_lock_init(&host->lock);
- host->res = r;
host->imask = MMC_I_MASK_ALL;
- host->base = devm_ioremap_resource(dev, r);
- if (IS_ERR(host->base)) {
- ret = PTR_ERR(host->base);
- goto out;
- }
+ host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
+ host->res = r;
/*
* Ensure that the host controller is shut down, and setup
@@ -706,51 +698,41 @@ static int pxamci_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, pxamci_irq, 0,
DRIVER_NAME, host);
if (ret)
- goto out;
+ return ret;
platform_set_drvdata(pdev, mmc);
- host->dma_chan_rx = dma_request_chan(dev, "rx");
- if (IS_ERR(host->dma_chan_rx)) {
- dev_err(dev, "unable to request rx dma channel\n");
- ret = PTR_ERR(host->dma_chan_rx);
- host->dma_chan_rx = NULL;
- goto out;
- }
+ host->dma_chan_rx = devm_dma_request_chan(dev, "rx");
+ if (IS_ERR(host->dma_chan_rx))
+ return dev_err_probe(dev, PTR_ERR(host->dma_chan_rx),
+ "unable to request rx dma channel\n");
- host->dma_chan_tx = dma_request_chan(dev, "tx");
- if (IS_ERR(host->dma_chan_tx)) {
- dev_err(dev, "unable to request tx dma channel\n");
- ret = PTR_ERR(host->dma_chan_tx);
- host->dma_chan_tx = NULL;
- goto out;
- }
+
+ host->dma_chan_tx = devm_dma_request_chan(dev, "tx");
+ if (IS_ERR(host->dma_chan_tx))
+ return dev_err_probe(dev, PTR_ERR(host->dma_chan_tx),
+ "unable to request tx dma channel\n");
if (host->pdata) {
host->detect_delay_ms = host->pdata->detect_delay_ms;
host->power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
- if (IS_ERR(host->power)) {
- ret = PTR_ERR(host->power);
- dev_err(dev, "Failed requesting gpio_power\n");
- goto out;
- }
+ if (IS_ERR(host->power))
+ return dev_err_probe(dev, PTR_ERR(host->power),
+ "Failed requesting gpio_power\n");
/* FIXME: should we pass detection delay to debounce? */
ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
- if (ret && ret != -ENOENT) {
- dev_err(dev, "Failed requesting gpio_cd\n");
- goto out;
- }
+ if (ret && ret != -ENOENT)
+ return dev_err_probe(dev, ret, "Failed requesting gpio_cd\n");
if (!host->pdata->gpio_card_ro_invert)
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
- if (ret && ret != -ENOENT) {
- dev_err(dev, "Failed requesting gpio_ro\n");
- goto out;
- }
+ if (ret && ret != -ENOENT)
+ return dev_err_probe(dev, ret, "Failed requesting gpio_ro\n");
+
if (!ret)
host->use_ro_gpio = true;
@@ -763,23 +745,16 @@ static int pxamci_probe(struct platform_device *pdev)
dev_warn(dev, "gpio_ro and get_ro() both defined\n");
}
- mmc_add_host(mmc);
-
- return 0;
-
-out:
- if (host) {
- if (host->dma_chan_rx)
- dma_release_channel(host->dma_chan_rx);
- if (host->dma_chan_tx)
- dma_release_channel(host->dma_chan_tx);
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ if (host->pdata && host->pdata->exit)
+ host->pdata->exit(dev, mmc);
}
- if (mmc)
- mmc_free_host(mmc);
+
return ret;
}
-static int pxamci_remove(struct platform_device *pdev)
+static void pxamci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
@@ -798,13 +773,7 @@ static int pxamci_remove(struct platform_device *pdev)
dmaengine_terminate_all(host->dma_chan_rx);
dmaengine_terminate_all(host->dma_chan_tx);
- dma_release_channel(host->dma_chan_rx);
- dma_release_channel(host->dma_chan_tx);
-
- mmc_free_host(mmc);
}
-
- return 0;
}
static struct platform_driver pxamci_driver = {
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index 53eded81a53e..afc36a407c2c 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -9,7 +9,10 @@
#ifndef RENESAS_SDHI_H
#define RENESAS_SDHI_H
+#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/platform_device.h>
+#include <linux/workqueue.h>
#include "tmio_mmc.h"
struct renesas_sdhi_scc {
@@ -18,6 +21,8 @@ struct renesas_sdhi_scc {
u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */
};
+#define SDHI_FLAG_NEED_CLKH_FALLBACK BIT(0)
+
struct renesas_sdhi_of_data {
unsigned long tmio_flags;
u32 tmio_ocr_mask;
@@ -31,30 +36,48 @@ struct renesas_sdhi_of_data {
int taps_num;
unsigned int max_blk_count;
unsigned short max_segs;
+ unsigned long sdhi_flags;
};
#define SDHI_CALIB_TABLE_MAX 32
+#define sdhi_has_quirk(p, q) ((p)->quirks && (p)->quirks->q)
+
struct renesas_sdhi_quirks {
bool hs400_disabled;
bool hs400_4taps;
+ bool fixed_addr_mode;
+ bool dma_one_rx_only;
+ bool manual_tap_correction;
+ bool old_info1_layout;
u32 hs400_bad_taps;
const u8 (*hs400_calib_table)[SDHI_CALIB_TABLE_MAX];
};
-struct tmio_mmc_dma {
+struct renesas_sdhi_of_data_with_quirks {
+ const struct renesas_sdhi_of_data *of_data;
+ const struct renesas_sdhi_quirks *quirks;
+};
+
+/* We want both end_flags to be set before we mark DMA as finished */
+#define SDHI_DMA_END_FLAG_DMA 0
+#define SDHI_DMA_END_FLAG_ACCESS 1
+
+struct renesas_sdhi_dma {
+ unsigned long end_flags;
enum dma_slave_buswidth dma_buswidth;
- bool (*filter)(struct dma_chan *chan, void *arg);
+ dma_filter_fn filter;
void (*enable)(struct tmio_mmc_host *host, bool enable);
- struct completion dma_dataend;
- struct tasklet_struct dma_complete;
+ struct completion dma_dataend;
+ struct work_struct dma_complete;
};
struct renesas_sdhi {
struct clk *clk;
+ struct clk *clkh;
struct clk *clk_cd;
struct tmio_mmc_data mmc_data;
- struct tmio_mmc_dma dma_priv;
+ struct renesas_sdhi_dma dma_priv;
const struct renesas_sdhi_quirks *quirks;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default, *pins_uhs;
@@ -63,6 +86,7 @@ struct renesas_sdhi {
u32 scc_tappos_hs400;
const u8 *adjust_hs400_calib_table;
bool needs_adjust_hs400;
+ bool card_is_sdio;
/* Tuning values: 1 for success, 0 for failure */
DECLARE_BITMAP(taps, BITS_PER_LONG);
@@ -72,12 +96,18 @@ struct renesas_sdhi {
unsigned int tap_set;
struct reset_control *rstc;
+ struct tmio_mmc_host *host;
+ struct regulator_dev *rdev;
};
#define host_to_priv(host) \
container_of((host)->pdata, struct renesas_sdhi, mmc_data)
int renesas_sdhi_probe(struct platform_device *pdev,
- const struct tmio_mmc_dma_ops *dma_ops);
-int renesas_sdhi_remove(struct platform_device *pdev);
+ const struct tmio_mmc_dma_ops *dma_ops,
+ const struct renesas_sdhi_of_data *of_data,
+ const struct renesas_sdhi_quirks *quirks);
+void renesas_sdhi_remove(struct platform_device *pdev);
+int renesas_sdhi_suspend(struct device *dev);
+int renesas_sdhi_resume(struct device *dev);
#endif
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index e49ca0f7fe9a..2a310a145785 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -22,21 +22,22 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinctrl-state.h>
+#include <linux/platform_data/tmio.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/reset.h>
#include <linux/sh_dma.h>
#include <linux/slab.h>
-#include <linux/sys_soc.h>
#include "renesas_sdhi.h"
#include "tmio_mmc.h"
@@ -51,9 +52,6 @@
#define HOST_MODE_GEN3_32BIT (HOST_MODE_GEN3_WMODE | HOST_MODE_GEN3_BUSWIDTH)
#define HOST_MODE_GEN3_64BIT 0
-#define CTL_SDIF_MODE 0xe6
-#define SDIF_MODE_HS400 BIT(0)
-
#define SDHI_VER_GEN2_SDR50 0x490c
#define SDHI_VER_RZ_A1 0x820b
/* very old datasheets said 0x490c for SDR104, too. They are wrong! */
@@ -127,10 +125,13 @@ static int renesas_sdhi_clk_enable(struct tmio_mmc_host *host)
}
static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
- unsigned int new_clock)
+ unsigned int wanted_clock)
{
struct renesas_sdhi *priv = host_to_priv(host);
+ struct clk *ref_clk = priv->clk;
unsigned int freq, diff, best_freq = 0, diff_min = ~0;
+ unsigned int new_clock, clkh_shift = 0;
+ unsigned int new_upper_limit;
int i;
/*
@@ -141,19 +142,35 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2) || mmc_doing_tune(host->mmc))
return clk_get_rate(priv->clk);
+ if (priv->clkh) {
+ /* HS400 with 4TAP needs different clock settings */
+ bool use_4tap = sdhi_has_quirk(priv, hs400_4taps);
+ bool need_slow_clkh = host->mmc->ios.timing == MMC_TIMING_MMC_HS400;
+ clkh_shift = use_4tap && need_slow_clkh ? 1 : 2;
+ ref_clk = priv->clkh;
+ }
+
+ new_clock = wanted_clock << clkh_shift;
+
/*
* We want the bus clock to be as close as possible to, but no
* greater than, new_clock. As we can divide by 1 << i for
* any i in [0, 9] we want the input clock to be as close as
* possible, but no greater than, new_clock << i.
+ *
+ * Add an upper limit of 1/1024 rate higher to the clock rate to fix
+ * clk rate jumping to lower rate due to rounding error (eg: RZ/G2L has
+ * 3 clk sources 533.333333 MHz, 400 MHz and 266.666666 MHz. The request
+ * for 533.333333 MHz will selects a slower 400 MHz due to rounding
+ * error (533333333 Hz / 4 * 4 = 533333332 Hz < 533333333 Hz)).
*/
for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) {
- freq = clk_round_rate(priv->clk, new_clock << i);
- if (freq > (new_clock << i)) {
+ freq = clk_round_rate(ref_clk, new_clock << i);
+ new_upper_limit = (new_clock << i) + ((new_clock << i) >> 10);
+ if (freq > new_upper_limit) {
/* Too fast; look for a slightly slower option */
- freq = clk_round_rate(priv->clk,
- (new_clock << i) / 4 * 3);
- if (freq > (new_clock << i))
+ freq = clk_round_rate(ref_clk, (new_clock << i) / 4 * 3);
+ if (freq > new_upper_limit)
continue;
}
@@ -164,7 +181,10 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
}
}
- clk_set_rate(priv->clk, best_freq);
+ clk_set_rate(ref_clk, best_freq);
+
+ if (priv->clkh)
+ clk_set_rate(priv->clk, best_freq >> clkh_shift);
return clk_get_rate(priv->clk);
}
@@ -172,6 +192,7 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
unsigned int new_clock)
{
+ unsigned int clk_margin;
u32 clk = 0, clock;
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
@@ -185,7 +206,13 @@ static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
host->mmc->actual_clock = renesas_sdhi_clk_update(host, new_clock);
clock = host->mmc->actual_clock / 512;
- for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
+ /*
+ * Add a margin of 1/1024 rate higher to the clock rate in order
+ * to avoid clk variable setting a value of 0 due to the margin
+ * provided for actual_clock in renesas_sdhi_clk_update().
+ */
+ clk_margin = new_clock >> 10;
+ for (clk = 0x80000080; new_clock + clk_margin >= (clock << 1); clk >>= 1)
clock <<= 1;
/* 1/1 clock is option */
@@ -196,7 +223,11 @@ static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
clk &= ~0xff;
}
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
+ clock = clk & CLK_CTL_DIV_MASK;
+ if (clock != CLK_CTL_DIV_MASK)
+ host->mmc->actual_clock /= (1 << (ffs(clock) + 1));
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clock);
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
usleep_range(10000, 11000);
@@ -305,27 +336,6 @@ static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
#define SH_MOBILE_SDHI_SCC_TMPPORT_CALIB_CODE_MASK 0x1f
#define SH_MOBILE_SDHI_SCC_TMPPORT_MANUAL_MODE BIT(7)
-static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
- { 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15,
- 16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 },
- { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11,
- 12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 }
-};
-
-static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
- { 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 },
- { 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17,
- 17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 }
-};
-
-static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10,
- 11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 }
-};
-
static inline u32 sd_scc_read32(struct tmio_mmc_host *host,
struct renesas_sdhi *priv, int addr)
{
@@ -380,7 +390,7 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
struct tmio_mmc_host *host = mmc_priv(mmc);
struct renesas_sdhi *priv = host_to_priv(host);
u32 bad_taps = priv->quirks ? priv->quirks->hs400_bad_taps : 0;
- bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
+ bool use_4tap = sdhi_has_quirk(priv, hs400_4taps);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
@@ -392,8 +402,7 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF,
priv->scc_tappos_hs400);
- /* Gen3 can't do automatic tap correction with HS400, so disable it */
- if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC)
+ if (sdhi_has_quirk(priv, manual_tap_correction))
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
@@ -403,10 +412,10 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
- /* Set the sampling clock selection range of HS400 mode */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
- 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+ sd_scc_read32(host, priv,
+ SH_MOBILE_SDHI_SCC_DTCNTL));
/* Avoid bad TAP */
if (bad_taps & BIT(priv->tap_set)) {
@@ -544,7 +553,7 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host,
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
- if (priv->adjust_hs400_calib_table)
+ if (sdhi_has_quirk(priv, hs400_calib_table) || sdhi_has_quirk(priv, hs400_bad_taps))
renesas_sdhi_adjust_hs400_mode_disable(host);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
@@ -571,21 +580,40 @@ static void renesas_sdhi_scc_reset(struct tmio_mmc_host *host, struct renesas_sd
}
/* only populated for TMIO_MMC_MIN_RCAR2 */
-static void renesas_sdhi_reset(struct tmio_mmc_host *host)
+static void renesas_sdhi_reset(struct tmio_mmc_host *host, bool preserve)
{
struct renesas_sdhi *priv = host_to_priv(host);
int ret;
u16 val;
- if (priv->rstc) {
- reset_control_reset(priv->rstc);
- /* Unknown why but without polling reset status, it will hang */
- read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
- false, priv->rstc);
- priv->needs_adjust_hs400 = false;
- renesas_sdhi_set_clock(host, host->clk_cache);
- } else if (priv->scc_ctl) {
- renesas_sdhi_scc_reset(host, priv);
+ if (!preserve) {
+ if (priv->rstc) {
+ u32 sd_status;
+ /*
+ * HW reset might have toggled the regulator state in
+ * HW which regulator core might be unaware of so save
+ * and restore the regulator state during HW reset.
+ */
+ if (priv->rdev)
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+
+ reset_control_reset(priv->rstc);
+ /* Unknown why but without polling reset status, it will hang */
+ read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
+ false, priv->rstc);
+ /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
+ if (priv->rdev)
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
+
+ priv->needs_adjust_hs400 = false;
+ renesas_sdhi_set_clock(host, host->clk_cache);
+
+ /* Ensure default value for this driver. */
+ renesas_sdhi_sdbuf_width(host, 16);
+ } else if (priv->scc_ctl) {
+ renesas_sdhi_scc_reset(host, priv);
+ }
}
if (sd_ctrl_read16(host, CTL_VERSION) >= SDHI_VER_GEN3_SD) {
@@ -647,7 +675,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
* is at least SH_MOBILE_SDHI_MIN_TAP_ROW probes long then use the
* center index as the tap, otherwise bail out.
*/
- bitmap_for_each_set_region(bitmap, rs, re, 0, taps_size) {
+ for_each_set_bitrange(rs, re, bitmap, taps_size) {
if (re - rs > tap_cnt) {
tap_end = re;
tap_start = rs;
@@ -663,9 +691,8 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
/* Set SCC */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, priv->tap_set);
- /* Enable auto re-tuning */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
- SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN |
+ (priv->card_is_sdio ? 0 : SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN) |
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
return 0;
@@ -692,7 +719,7 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
/* Issue CMD19 twice for each tap */
for (i = 0; i < 2 * priv->tap_num; i++) {
- int cmd_error;
+ int cmd_error = 0;
/* Set sampling clock position */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
@@ -726,7 +753,7 @@ static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
/* Change TAP position according to correction status */
- if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC &&
+ if (sdhi_has_quirk(priv, manual_tap_correction) &&
host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
u32 bad_taps = priv->quirks ? priv->quirks->hs400_bad_taps : 0;
/*
@@ -755,6 +782,14 @@ static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_
if (bad_taps & BIT(new_tap % priv->tap_num))
return test_bit(error_tap % priv->tap_num, priv->smpcmp);
} else {
+ if (!priv->card_is_sdio &&
+ !(val & SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR)) {
+ u32 smpcmp = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP);
+
+ /* DAT1 is unmatched because of an SDIO irq */
+ if (smpcmp & (BIT(17) | BIT(1)))
+ return false;
+ }
if (val & SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR)
return true; /* need retune */
else if (val & SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPUP)
@@ -790,7 +825,7 @@ static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host,
struct mmc_request *mrq)
{
struct renesas_sdhi *priv = host_to_priv(host);
- bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
+ bool use_4tap = sdhi_has_quirk(priv, hs400_4taps);
bool ret = false;
/*
@@ -805,11 +840,14 @@ static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host,
if (mmc_doing_tune(host->mmc))
return false;
- if (((mrq->cmd->error == -ETIMEDOUT) ||
- (mrq->data && mrq->data->error == -ETIMEDOUT)) &&
- ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
- (host->ops.get_cd && host->ops.get_cd(host->mmc))))
- ret |= true;
+ /* mrq can be NULL to check SCC error on SDIO irq without any request */
+ if (mrq) {
+ if (((mrq->cmd->error == -ETIMEDOUT) ||
+ (mrq->data && mrq->data->error == -ETIMEDOUT)) &&
+ ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
+ (host->ops.get_cd && host->ops.get_cd(host->mmc))))
+ ret |= true;
+ }
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN)
@@ -820,6 +858,28 @@ static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host,
return ret;
}
+static void renesas_sdhi_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ /*
+ * This controller cannot do auto-retune with SDIO irqs, so we
+ * then need to enforce manual correction. However, when tuning,
+ * mmc->card is not populated yet, so we don't know if the card
+ * is SDIO. init_card provides this information earlier, so we
+ * keep a copy of it.
+ */
+ priv->card_is_sdio = mmc_card_sdio(card);
+}
+
+static void renesas_sdhi_sdio_irq(struct tmio_mmc_host *host)
+{
+ /* This controller requires retune when an SDIO irq occurs */
+ if (renesas_sdhi_check_scc_error(host, NULL))
+ mmc_retune_needed(host->mmc);
+}
+
static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host, u32 bit)
{
int timeout = 1000;
@@ -895,83 +955,119 @@ static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
renesas_sdhi_sdbuf_width(host, enable ? width : 16);
}
-static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = {
- .hs400_disabled = true,
- .hs400_4taps = true,
+static const unsigned int renesas_sdhi_vqmmc_voltages[] = {
+ 3300000, 1800000
};
-static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
- .hs400_4taps = true,
- .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
-};
+static int renesas_sdhi_regulator_disable(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
-static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
- .hs400_disabled = true,
-};
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+ sd_status &= ~SD_STATUS_PWEN;
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
-static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
- .hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
-};
+ return 0;
+}
-static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
- .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
-};
+static int renesas_sdhi_regulator_enable(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
-static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
- .hs400_4taps = true,
- .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
- .hs400_calib_table = r8a7796_es13_calib_table,
-};
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+ sd_status |= SD_STATUS_PWEN;
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
-static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
- .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
- .hs400_calib_table = r8a77965_calib_table,
-};
+ return 0;
+}
+
+static int renesas_sdhi_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+
+ return (sd_status & SD_STATUS_PWEN) ? 1 : 0;
+}
+
+static int renesas_sdhi_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+
+ return (sd_status & SD_STATUS_IOVS) ? 1800000 : 3300000;
+}
+
+static int renesas_sdhi_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
-static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
- .hs400_calib_table = r8a77990_calib_table,
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+ if (min_uV >= 1700000 && max_uV <= 1950000) {
+ sd_status |= SD_STATUS_IOVS;
+ *selector = 1;
+ } else {
+ sd_status &= ~SD_STATUS_IOVS;
+ *selector = 0;
+ }
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
+
+ return 0;
+}
+
+static int renesas_sdhi_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector >= ARRAY_SIZE(renesas_sdhi_vqmmc_voltages))
+ return -EINVAL;
+
+ return renesas_sdhi_vqmmc_voltages[selector];
+}
+
+static const struct regulator_ops renesas_sdhi_regulator_voltage_ops = {
+ .enable = renesas_sdhi_regulator_enable,
+ .disable = renesas_sdhi_regulator_disable,
+ .is_enabled = renesas_sdhi_regulator_is_enabled,
+ .list_voltage = renesas_sdhi_regulator_list_voltage,
+ .get_voltage = renesas_sdhi_regulator_get_voltage,
+ .set_voltage = renesas_sdhi_regulator_set_voltage,
};
-/*
- * Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
- * So, we want to treat them equally and only have a match for ES1.2 to enforce
- * this if there ever will be a way to distinguish ES1.2.
- */
-static const struct soc_device_attribute sdhi_quirks_match[] = {
- { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
- { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
- { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
- { .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
- { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
- { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
- { .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 },
- { .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 },
- { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
- { .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 },
- { /* Sentinel. */ },
+static const struct regulator_desc renesas_sdhi_vqmmc_regulator = {
+ .name = "sdhi-vqmmc-regulator",
+ .of_match = of_match_ptr("vqmmc-regulator"),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &renesas_sdhi_regulator_voltage_ops,
+ .volt_table = renesas_sdhi_vqmmc_voltages,
+ .n_voltages = ARRAY_SIZE(renesas_sdhi_vqmmc_voltages),
};
int renesas_sdhi_probe(struct platform_device *pdev,
- const struct tmio_mmc_dma_ops *dma_ops)
+ const struct tmio_mmc_dma_ops *dma_ops,
+ const struct renesas_sdhi_of_data *of_data,
+ const struct renesas_sdhi_quirks *quirks)
{
struct tmio_mmc_data *mmd = pdev->dev.platform_data;
- const struct renesas_sdhi_quirks *quirks = NULL;
- const struct renesas_sdhi_of_data *of_data;
- const struct soc_device_attribute *attr;
struct tmio_mmc_data *mmc_data;
- struct tmio_mmc_dma *dma_priv;
+ struct regulator_config rcfg = { .dev = &pdev->dev, };
+ struct regulator_dev *rdev;
+ struct renesas_sdhi_dma *dma_priv;
+ struct device *dev = &pdev->dev;
struct tmio_mmc_host *host;
struct renesas_sdhi *priv;
int num_irqs, irq, ret, i;
struct resource *res;
u16 ver;
- of_data = of_device_get_match_data(&pdev->dev);
-
- attr = soc_device_match(sdhi_quirks_match);
- if (attr)
- quirks = attr->data;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
@@ -986,11 +1082,12 @@ int renesas_sdhi_probe(struct platform_device *pdev,
dma_priv = &priv->dma_priv;
priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), "cannot get clock");
+
+ priv->clkh = devm_clk_get_optional(&pdev->dev, "clkh");
+ if (IS_ERR(priv->clkh))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clkh), "cannot get clkh");
/*
* Some controllers provide a 2nd clock just to run the internal card
@@ -1003,9 +1100,13 @@ int renesas_sdhi_probe(struct platform_device *pdev,
* to the card detect circuit. That leaves us with if separate clocks
* are presented, we must treat them both as virtually 1 clock.
*/
- priv->clk_cd = devm_clk_get(&pdev->dev, "cd");
+ priv->clk_cd = devm_clk_get_optional(&pdev->dev, "cd");
if (IS_ERR(priv->clk_cd))
- priv->clk_cd = NULL;
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cd), "cannot get cd clock");
+
+ priv->rstc = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return PTR_ERR(priv->rstc);
priv->pinctrl = devm_pinctrl_get(&pdev->dev);
if (!IS_ERR(priv->pinctrl)) {
@@ -1019,6 +1120,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(host))
return PTR_ERR(host);
+ priv->host = host;
+
if (of_data) {
mmc_data->flags |= of_data->tmio_flags;
mmc_data->ocr_mask = of_data->tmio_ocr_mask;
@@ -1029,20 +1132,24 @@ int renesas_sdhi_probe(struct platform_device *pdev,
mmc_data->max_segs = of_data->max_segs;
dma_priv->dma_buswidth = of_data->dma_buswidth;
host->bus_shift = of_data->bus_shift;
+ /* Fallback for old DTs */
+ if (!priv->clkh && of_data->sdhi_flags & SDHI_FLAG_NEED_CLKH_FALLBACK)
+ priv->clkh = clk_get_parent(clk_get_parent(priv->clk));
+
}
- host->write16_hook = renesas_sdhi_write16_hook;
- host->clk_enable = renesas_sdhi_clk_enable;
- host->clk_disable = renesas_sdhi_clk_disable;
- host->set_clock = renesas_sdhi_set_clock;
- host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
- host->dma_ops = dma_ops;
+ host->write16_hook = renesas_sdhi_write16_hook;
+ host->clk_enable = renesas_sdhi_clk_enable;
+ host->clk_disable = renesas_sdhi_clk_disable;
+ host->set_clock = renesas_sdhi_set_clock;
+ host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
+ host->dma_ops = dma_ops;
- if (quirks && quirks->hs400_disabled)
+ if (sdhi_has_quirk(priv, hs400_disabled))
host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
/* For some SoC, we disable internal WP. GPIO may override this */
- if (mmc_can_gpio_ro(host->mmc))
+ if (mmc_host_can_gpio_ro(host->mmc))
mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT;
/* SDR speeds are only available on Gen2+ */
@@ -1054,6 +1161,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
host->sdcard_irq_mask_all = TMIO_MASK_ALL_RCAR2;
host->reset = renesas_sdhi_reset;
+ } else {
+ host->sdcard_irq_mask_all = TMIO_MASK_ALL;
}
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
@@ -1066,7 +1175,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
dma_priv->filter = shdma_chan_filter;
dma_priv->enable = renesas_sdhi_enable_dma;
- mmc_data->alignment_shift = 1; /* 2-byte alignment */
mmc_data->capabilities |= MMC_CAP_MMC_HIGHSPEED;
/*
@@ -1093,11 +1201,20 @@ int renesas_sdhi_probe(struct platform_device *pdev,
ret = renesas_sdhi_clk_enable(host);
if (ret)
- goto efree;
+ return ret;
- priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
- if (IS_ERR(priv->rstc))
- return PTR_ERR(priv->rstc);
+ rcfg.of_node = of_get_available_child_by_name(dev->of_node, "vqmmc-regulator");
+ if (rcfg.of_node) {
+ rcfg.driver_data = priv->host;
+ rdev = devm_regulator_register(dev, &renesas_sdhi_vqmmc_regulator, &rcfg);
+ of_node_put(rcfg.of_node);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "regulator register failed err=%ld", PTR_ERR(rdev));
+ ret = PTR_ERR(rdev);
+ goto edisclk;
+ }
+ priv->rdev = rdev;
+ }
ver = sd_ctrl_read16(host, CTL_VERSION);
/* GEN2_SDR104 is first known SDHI to use 32bit block count */
@@ -1108,7 +1225,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ver == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
- if (ver == SDHI_VER_GEN3_SDMMC && quirks && quirks->hs400_calib_table) {
+ if (ver == SDHI_VER_GEN3_SDMMC && sdhi_has_quirk(priv, hs400_calib_table)) {
host->fixup_request = renesas_sdhi_fixup_request;
priv->adjust_hs400_calib_table = *(
res->start == SDHI_GEN3_MMC0_ADDR ?
@@ -1120,13 +1237,15 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ver >= SDHI_VER_GEN3_SD)
host->get_timeout_cycles = renesas_sdhi_gen3_get_cycles;
+ /* Check for SCC so we can reset it if needed */
+ if (of_data && of_data->scc_offset && ver >= SDHI_VER_GEN2_SDR104)
+ priv->scc_ctl = host->ctl + of_data->scc_offset;
+
/* Enable tuning iff we have an SCC and a supported mode */
- if (of_data && of_data->scc_offset &&
- (host->mmc->caps & MMC_CAP_UHS_SDR104 ||
- host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
- MMC_CAP2_HS400_1_8V))) {
+ if (priv->scc_ctl && (host->mmc->caps & MMC_CAP_UHS_SDR104 ||
+ host->mmc->caps2 & MMC_CAP2_HSX00_1_8V)) {
const struct renesas_sdhi_scc *taps = of_data->taps;
- bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
+ bool use_4tap = sdhi_has_quirk(priv, hs400_4taps);
bool hit = false;
for (i = 0; i < of_data->taps_num; i++) {
@@ -1144,69 +1263,96 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (!hit)
dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
- priv->scc_ctl = host->ctl + of_data->scc_offset;
host->check_retune = renesas_sdhi_check_scc_error;
+ host->sdio_irq = renesas_sdhi_sdio_irq;
+ host->ops.init_card = renesas_sdhi_init_card;
host->ops.execute_tuning = renesas_sdhi_execute_tuning;
host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning;
host->ops.hs400_downgrade = renesas_sdhi_disable_scc;
host->ops.hs400_complete = renesas_sdhi_hs400_complete;
}
- ret = tmio_mmc_host_probe(host);
- if (ret < 0)
- goto edisclk;
-
- num_irqs = platform_irq_count(pdev);
- if (num_irqs < 0) {
- ret = num_irqs;
- goto eirq;
- }
+ sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
/* There must be at least one IRQ source */
- if (!num_irqs) {
- ret = -ENXIO;
- goto eirq;
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs <= 0) {
+ ret = num_irqs ?: -ENOENT;
+ goto edisclk;
}
for (i = 0; i < num_irqs; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = irq;
- goto eirq;
+ goto edisclk;
}
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
- goto eirq;
+ goto edisclk;
}
+ ret = tmio_mmc_host_probe(host);
+ if (ret < 0)
+ goto edisclk;
+
dev_info(&pdev->dev, "%s base at %pa, max clock rate %u MHz\n",
mmc_hostname(host->mmc), &res->start, host->mmc->f_max / 1000000);
return ret;
-eirq:
- tmio_mmc_host_remove(host);
edisclk:
renesas_sdhi_clk_disable(host);
-efree:
- tmio_mmc_host_free(host);
-
return ret;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
-int renesas_sdhi_remove(struct platform_device *pdev)
+void renesas_sdhi_remove(struct platform_device *pdev)
{
struct tmio_mmc_host *host = platform_get_drvdata(pdev);
tmio_mmc_host_remove(host);
renesas_sdhi_clk_disable(host);
- tmio_mmc_host_free(host);
-
- return 0;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
+int renesas_sdhi_suspend(struct device *dev)
+{
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
+ struct renesas_sdhi *priv = host_to_priv(host);
+ int ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = reset_control_assert(priv->rstc);
+ if (ret)
+ pm_runtime_force_resume(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(renesas_sdhi_suspend);
+
+int renesas_sdhi_resume(struct device *dev)
+{
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
+ struct renesas_sdhi *priv = host_to_priv(host);
+ int ret;
+
+ ret = reset_control_deassert(priv->rstc);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ reset_control_assert(priv->rstc);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(renesas_sdhi_resume);
+
+MODULE_DESCRIPTION("Renesas SDHI core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index e8f4863d8f1a..f6ebb7bc7ede 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -11,11 +11,13 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io-64-nonatomic-hi-lo.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pagemap.h>
+#include <linux/platform_data/tmio.h>
+#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/sys_soc.h>
@@ -46,9 +48,9 @@
#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
/* DM_CM_INFO1 and DM_CM_INFO1_MASK */
-#define INFO1_CLEAR 0
#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0)
-#define INFO1_DTRANEND1 BIT(17)
+#define INFO1_DTRANEND1 BIT(20)
+#define INFO1_DTRANEND1_OLD BIT(17)
#define INFO1_DTRANEND0 BIT(16)
/* DM_CM_INFO2 and DM_CM_INFO2_MASK */
@@ -71,17 +73,12 @@ enum renesas_sdhi_dma_cookie {
static unsigned long global_flags;
/*
- * Workaround for avoiding to use RX DMAC by multiple channels.
- * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use
- * RX DMAC simultaneously, sometimes hundreds of bytes data are not
- * stored into the system memory even if the DMAC interrupt happened.
- * So, this driver then uses one RX DMAC channel only.
+ * Workaround for avoiding to use RX DMAC by multiple channels. On R-Car M3-W
+ * ES1.0, when multiple SDHI channels use RX DMAC simultaneously, sometimes
+ * hundreds of data bytes are not stored into the system memory even if the
+ * DMAC interrupt happened. So, this driver then uses one RX DMAC channel only.
*/
-#define SDHI_INTERNAL_DMAC_ONE_RX_ONLY 0
-#define SDHI_INTERNAL_DMAC_RX_IN_USE 1
-
-/* RZ/A2 does not have the ADRR_MODE bit */
-#define SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY 2
+#define SDHI_INTERNAL_DMAC_RX_IN_USE 0
/* Definitions for sampling clocks */
static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
@@ -92,7 +89,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
},
};
-static const struct renesas_sdhi_of_data of_rza2_compatible = {
+static const struct renesas_sdhi_of_data of_data_rza2 = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
TMIO_MMC_HAVE_CBSY,
.tmio_ocr_mask = MMC_VDD_32_33,
@@ -107,9 +104,27 @@ static const struct renesas_sdhi_of_data of_rza2_compatible = {
.max_segs = 1,
};
-static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
+static const struct renesas_sdhi_of_data of_data_rcar_gen3 = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
+ TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2 |
+ TMIO_MMC_64BIT_DATA_PORT,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+ MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
+ .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
+ .bus_shift = 2,
+ .scc_offset = 0x1000,
+ .taps = rcar_gen3_scc_taps,
+ .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
+ /* DMAC can handle 32bit blk count but only 1 segment */
+ .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
+ .max_segs = 1,
+ .sdhi_flags = SDHI_FLAG_NEED_CLKH_FALLBACK,
+};
+
+static const struct renesas_sdhi_of_data of_data_rcar_gen3_no_sdh_fallback = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
- TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
+ TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2 |
+ TMIO_MMC_64BIT_DATA_PORT,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
.capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
@@ -122,64 +137,229 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
.max_segs = 1,
};
+static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
+ { 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15,
+ 16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 },
+ { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11,
+ 12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 }
+};
+
+static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
+ { 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 },
+ { 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 }
+};
+
+static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10,
+ 11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 }
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = {
+ .hs400_disabled = true,
+ .hs400_4taps = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400_one_rx = {
+ .hs400_disabled = true,
+ .hs400_4taps = true,
+ .dma_one_rx_only = true,
+ .old_info1_layout = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
+ .hs400_4taps = true,
+ .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .manual_tap_correction = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
+ .hs400_disabled = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_fixed_addr = {
+ .fixed_addr_mode = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
+ .hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
+ .manual_tap_correction = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
+ .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .manual_tap_correction = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
+ .hs400_4taps = true,
+ .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .hs400_calib_table = r8a7796_es13_calib_table,
+ .manual_tap_correction = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
+ .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .hs400_calib_table = r8a77965_calib_table,
+ .manual_tap_correction = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
+ .hs400_calib_table = r8a77990_calib_table,
+ .manual_tap_correction = true,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_rzg2l = {
+ .fixed_addr_mode = true,
+ .hs400_disabled = true,
+};
+
+/*
+ * Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
+ * So, we want to treat them equally and only have a match for ES1.2 to enforce
+ * this if there ever will be a way to distinguish ES1.2.
+ */
+static const struct soc_device_attribute sdhi_quirks_match[] = {
+ { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
+ { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
+ { .soc_id = "r8a7796", .revision = "ES1.0", .data = &sdhi_quirks_4tap_nohs400_one_rx },
+ { .soc_id = "r8a7796", .revision = "ES1.[12]", .data = &sdhi_quirks_4tap_nohs400 },
+ { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
+ { .soc_id = "r8a77980", .revision = "ES1.*", .data = &sdhi_quirks_nohs400 },
+ { /* Sentinel. */ }
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_r8a7795_compatible = {
+ .of_data = &of_data_rcar_gen3,
+ .quirks = &sdhi_quirks_bad_taps2367,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_r8a77961_compatible = {
+ .of_data = &of_data_rcar_gen3,
+ .quirks = &sdhi_quirks_bad_taps1357,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_r8a77965_compatible = {
+ .of_data = &of_data_rcar_gen3,
+ .quirks = &sdhi_quirks_r8a77965,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_r8a77970_compatible = {
+ .of_data = &of_data_rcar_gen3_no_sdh_fallback,
+ .quirks = &sdhi_quirks_nohs400,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_r8a77990_compatible = {
+ .of_data = &of_data_rcar_gen3,
+ .quirks = &sdhi_quirks_r8a77990,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_rzg2l_compatible = {
+ .of_data = &of_data_rcar_gen3,
+ .quirks = &sdhi_quirks_rzg2l,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_compatible = {
+ .of_data = &of_data_rcar_gen3,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_nohs400_compatible = {
+ .of_data = &of_data_rcar_gen3,
+ .quirks = &sdhi_quirks_nohs400,
+};
+
+static const struct renesas_sdhi_of_data_with_quirks of_rza2_compatible = {
+ .of_data = &of_data_rza2,
+ .quirks = &sdhi_quirks_fixed_addr,
+};
+
static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r7s9210", .data = &of_rza2_compatible, },
{ .compatible = "renesas,sdhi-mmc-r8a77470", .data = &of_rcar_gen3_compatible, },
- { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
- { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,sdhi-r8a7795", .data = &of_r8a7795_compatible, },
+ { .compatible = "renesas,sdhi-r8a77961", .data = &of_r8a77961_compatible, },
+ { .compatible = "renesas,sdhi-r8a77965", .data = &of_r8a77965_compatible, },
+ { .compatible = "renesas,sdhi-r8a77970", .data = &of_r8a77970_compatible, },
+ { .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
+ { .compatible = "renesas,sdhi-r8a77995", .data = &of_rcar_gen3_nohs400_compatible, },
+ { .compatible = "renesas,sdhi-r9a09g011", .data = &of_rzg2l_compatible, },
+ { .compatible = "renesas,sdhi-r9a09g057", .data = &of_rzg2l_compatible, },
+ { .compatible = "renesas,rzg2l-sdhi", .data = &of_rzg2l_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen4-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
static void
-renesas_sdhi_internal_dmac_dm_write(struct tmio_mmc_host *host,
- int addr, u64 val)
-{
- writeq(val, host->ctl + addr);
-}
-
-static void
renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
{
struct renesas_sdhi *priv = host_to_priv(host);
+ u32 dma_irqs = INFO1_DTRANEND0 |
+ (sdhi_has_quirk(priv, old_info1_layout) ?
+ INFO1_DTRANEND1_OLD : INFO1_DTRANEND1);
if (!host->chan_tx || !host->chan_rx)
return;
- if (!enable)
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1,
- INFO1_CLEAR);
+ writel(enable ? ~dma_irqs : INFO1_MASK_CLEAR, host->ctl + DM_CM_INFO1_MASK);
if (priv->dma_priv.enable)
priv->dma_priv.enable(host, enable);
}
static void
-renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
+renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host)
+{
u64 val = RST_DTRANRST1 | RST_DTRANRST0;
renesas_sdhi_internal_dmac_enable_dma(host, false);
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
- RST_RESERVED_BITS & ~val);
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
- RST_RESERVED_BITS | val);
+ writel(RST_RESERVED_BITS & ~val, host->ctl + DM_CM_RST);
+ writel(RST_RESERVED_BITS | val, host->ctl + DM_CM_RST);
clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
renesas_sdhi_internal_dmac_enable_dma(host, true);
}
+static bool renesas_sdhi_internal_dmac_dma_irq(struct tmio_mmc_host *host)
+{
+ struct renesas_sdhi *priv = host_to_priv(host);
+ struct renesas_sdhi_dma *dma_priv = &priv->dma_priv;
+
+ u32 dma_irqs = INFO1_DTRANEND0 |
+ (sdhi_has_quirk(priv, old_info1_layout) ?
+ INFO1_DTRANEND1_OLD : INFO1_DTRANEND1);
+ u32 status = readl(host->ctl + DM_CM_INFO1);
+
+ if (status & dma_irqs) {
+ writel(status ^ dma_irqs, host->ctl + DM_CM_INFO1);
+ set_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags);
+ if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags))
+ queue_work(system_bh_wq, &dma_priv->dma_complete);
+ }
+
+ return status & dma_irqs;
+}
+
static void
-renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) {
+renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host)
+{
struct renesas_sdhi *priv = host_to_priv(host);
+ struct renesas_sdhi_dma *dma_priv = &priv->dma_priv;
- tasklet_schedule(&priv->dma_priv.dma_complete);
+ set_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags);
+ if (test_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags) ||
+ host->data->error)
+ queue_work(system_bh_wq, &dma_priv->dma_complete);
}
/*
- * renesas_sdhi_internal_dmac_map() will be called with two difference
+ * renesas_sdhi_internal_dmac_map() will be called with two different
* sg pointers in two mmc_data by .pre_req(), but tmio host can have a single
* sg_ptr only. So, renesas_sdhi_internal_dmac_{un}map() should use a sg
* pointer in a mmc_data instead of host->sg_ptr.
@@ -213,7 +393,7 @@ renesas_sdhi_internal_dmac_map(struct tmio_mmc_host *host,
data->host_cookie = cookie;
- /* This DMAC cannot handle if buffer is not 128-bytes alignment */
+ /* This DMAC needs buffers to be 128-byte aligned */
if (!IS_ALIGNED(sg_dma_address(data->sg), 128)) {
renesas_sdhi_internal_dmac_unmap(host, data, cookie);
return false;
@@ -226,10 +406,11 @@ static void
renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr;
u32 dtran_mode = DTRAN_MODE_BUS_WIDTH;
- if (!test_bit(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY, &global_flags))
+ if (!sdhi_has_quirk(priv, fixed_addr_mode))
dtran_mode |= DTRAN_MODE_ADDR_MODE;
if (!renesas_sdhi_internal_dmac_map(host, data, COOKIE_MAPPED))
@@ -237,20 +418,19 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
if (data->flags & MMC_DATA_READ) {
dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
- if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
+ if (sdhi_has_quirk(priv, dma_one_rx_only) &&
test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
goto force_pio_with_unmap;
} else {
dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
}
+ priv->dma_priv.end_flags = 0;
renesas_sdhi_internal_dmac_enable_dma(host, true);
/* set dma parameters */
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_DTRAN_MODE,
- dtran_mode);
- renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR,
- sg_dma_address(sg));
+ writel(dtran_mode, host->ctl + DM_CM_DTRAN_MODE);
+ writel(sg_dma_address(sg), host->ctl + DM_DTRAN_ADDR);
host->dma_on = true;
@@ -263,15 +443,22 @@ force_pio:
renesas_sdhi_internal_dmac_enable_dma(host, false);
}
-static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
+static void renesas_sdhi_internal_dmac_issue_work_fn(struct work_struct *work)
{
- struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+ struct tmio_mmc_host *host = from_work(host, work, dma_issue);
+ struct renesas_sdhi *priv = host_to_priv(host);
tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
- /* start the DMAC */
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_DTRAN_CTRL,
- DTRAN_CTRL_DM_START);
+ if (!host->cmd->error) {
+ /* start the DMAC */
+ writel(DTRAN_CTRL_DM_START, host->ctl + DM_CM_DTRAN_CTRL);
+ } else {
+ /* on CMD errors, simulate DMA end immediately */
+ set_bit(SDHI_DMA_END_FLAG_DMA, &priv->dma_priv.end_flags);
+ if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &priv->dma_priv.end_flags))
+ queue_work(system_bh_wq, &priv->dma_priv.dma_complete);
+ }
}
static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
@@ -300,9 +487,11 @@ static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
return true;
}
-static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
+static void renesas_sdhi_internal_dmac_complete_work_fn(struct work_struct *work)
{
- struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+ struct renesas_sdhi_dma *dma_priv = from_work(dma_priv, work, dma_complete);
+ struct renesas_sdhi *priv = container_of(dma_priv, typeof(*priv), dma_priv);
+ struct tmio_mmc_host *host = priv->host;
spin_lock_irq(&host->lock);
if (!renesas_sdhi_internal_dmac_complete(host))
@@ -351,21 +540,19 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
{
struct renesas_sdhi *priv = host_to_priv(host);
- /* Disable DMAC interrupts, we don't use them */
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK,
- INFO1_MASK_CLEAR);
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK,
- INFO2_MASK_CLEAR);
+ /* Disable DMAC interrupts initially */
+ writel(INFO1_MASK_CLEAR, host->ctl + DM_CM_INFO1_MASK);
+ writel(INFO2_MASK_CLEAR, host->ctl + DM_CM_INFO2_MASK);
+ writel(0, host->ctl + DM_CM_INFO1);
+ writel(0, host->ctl + DM_CM_INFO2);
/* Each value is set to non-zero to assume "enabling" each DMA */
host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
- tasklet_init(&priv->dma_priv.dma_complete,
- renesas_sdhi_internal_dmac_complete_tasklet_fn,
- (unsigned long)host);
- tasklet_init(&host->dma_issue,
- renesas_sdhi_internal_dmac_issue_tasklet_fn,
- (unsigned long)host);
+ INIT_WORK(&priv->dma_priv.dma_complete,
+ renesas_sdhi_internal_dmac_complete_work_fn);
+ INIT_WORK(&host->dma_issue,
+ renesas_sdhi_internal_dmac_issue_work_fn);
/* Add pre_req and post_req */
host->ops.pre_req = renesas_sdhi_internal_dmac_pre_req;
@@ -387,49 +574,42 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
.abort = renesas_sdhi_internal_dmac_abort_dma,
.dataend = renesas_sdhi_internal_dmac_dataend_dma,
.end = renesas_sdhi_internal_dmac_end_dma,
-};
-
-/*
- * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
- * implementation as others may use a different implementation.
- */
-static const struct soc_device_attribute soc_dma_quirks[] = {
- { .soc_id = "r7s9210",
- .data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) },
- { .soc_id = "r8a7795", .revision = "ES1.*",
- .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
- { .soc_id = "r8a7796", .revision = "ES1.0",
- .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
- { /* sentinel */ }
+ .dma_irq = renesas_sdhi_internal_dmac_dma_irq,
};
static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
{
- const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks);
+ const struct soc_device_attribute *attr;
+ const struct renesas_sdhi_of_data_with_quirks *of_data_quirks;
+ const struct renesas_sdhi_quirks *quirks;
struct device *dev = &pdev->dev;
- if (soc)
- global_flags |= (unsigned long)soc->data;
+ of_data_quirks = of_device_get_match_data(&pdev->dev);
+ quirks = of_data_quirks->quirks;
+
+ attr = soc_device_match(sdhi_quirks_match);
+ if (attr)
+ quirks = attr->data;
/* value is max of SD_SECCNT. Confirmed by HW engineers */
dma_set_max_seg_size(dev, 0xffffffff);
- return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops);
+ return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops,
+ of_data_quirks->of_data, quirks);
}
static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
- tmio_mmc_host_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(renesas_sdhi_suspend, renesas_sdhi_resume)
+ RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
+ tmio_mmc_host_runtime_resume,
+ NULL)
};
static struct platform_driver renesas_internal_dmac_sdhi_driver = {
.driver = {
.name = "renesas_sdhi_internal_dmac",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &renesas_sdhi_internal_dmac_dev_pm_ops,
+ .pm = pm_ptr(&renesas_sdhi_internal_dmac_dev_pm_ops),
.of_match_table = renesas_sdhi_internal_dmac_of_match,
},
.probe = renesas_sdhi_internal_dmac_probe,
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index ffa64211f4de..543ad1d0ed1c 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -11,12 +11,14 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pagemap.h>
+#include <linux/platform_data/tmio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/sys_soc.h>
@@ -58,7 +60,8 @@ static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = {
static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
- TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
+ TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2 |
+ TMIO_MMC_32BIT_DATA_PORT,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
.capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
@@ -108,9 +111,9 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
renesas_sdhi_sys_dmac_enable_dma(host, false);
if (host->chan_rx)
- dmaengine_terminate_all(host->chan_rx);
+ dmaengine_terminate_sync(host->chan_rx);
if (host->chan_tx)
- dmaengine_terminate_all(host->chan_tx);
+ dmaengine_terminate_sync(host->chan_tx);
renesas_sdhi_sys_dmac_enable_dma(host, true);
}
@@ -160,7 +163,7 @@ static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
- unsigned int align = (1 << host->pdata->alignment_shift) - 1;
+ unsigned int align = 1; /* 2-byte alignment */
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
@@ -232,7 +235,7 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
- unsigned int align = (1 << host->pdata->alignment_shift) - 1;
+ unsigned int align = 1; /* 2-byte alignment */
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
@@ -254,12 +257,11 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
- unsigned long flags;
- void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
+ void *sg_vaddr = kmap_local_page(sg_page(sg));
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
- memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
- tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
+ memcpy(host->bounce_buf, sg_vaddr + sg->offset, host->bounce_sg.length);
+ kunmap_local(sg_vaddr);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
@@ -312,9 +314,9 @@ static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host,
}
}
-static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
+static void renesas_sdhi_sys_dmac_issue_work_fn(struct work_struct *work)
{
- struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
+ struct tmio_mmc_host *host = from_work(host, work, dma_issue);
struct dma_chan *chan = NULL;
spin_lock_irq(&host->lock);
@@ -401,9 +403,8 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
goto ebouncebuf;
init_completion(&priv->dma_priv.dma_dataend);
- tasklet_init(&host->dma_issue,
- renesas_sdhi_sys_dmac_issue_tasklet_fn,
- (unsigned long)host);
+ INIT_WORK(&host->dma_issue,
+ renesas_sdhi_sys_dmac_issue_work_fn);
}
renesas_sdhi_sys_dmac_enable_dma(host, true);
@@ -451,7 +452,8 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = {
static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
{
- return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops);
+ return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops,
+ of_device_get_match_data(&pdev->dev), NULL);
}
static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = {
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 4ca937415734..dc2587ff8519 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -7,6 +7,7 @@
* Wei WANG <wei_wang@realsil.com.cn>
*/
+#include <linux/pci.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/highmem.h>
@@ -19,7 +20,7 @@
#include <linux/mmc/sdio.h>
#include <linux/mmc/card.h>
#include <linux/rtsx_pci.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/pm_runtime.h>
struct realtek_pci_sdmmc {
@@ -38,10 +39,7 @@ struct realtek_pci_sdmmc {
bool double_clk;
bool eject;
bool initial_mode;
- int power_state;
-#define SDMMC_POWER_ON 1
-#define SDMMC_POWER_OFF 0
-
+ int prev_power_state;
int sg_count;
s32 cookie;
int cookie_sg_count;
@@ -117,8 +115,6 @@ static int sd_response_type(struct mmc_command *cmd)
return SD_RSP_TYPE_R0;
case MMC_RSP_R1:
return SD_RSP_TYPE_R1;
- case MMC_RSP_R1_NO_CRC:
- return SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
case MMC_RSP_R1B:
return SD_RSP_TYPE_R1b;
case MMC_RSP_R2:
@@ -542,9 +538,22 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host,
return 0;
}
+static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
+{
+ rtsx_pci_write_register(host->pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
+}
+
+static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
+{
+ rtsx_pci_write_register(host->pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
+}
+
static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
{
struct mmc_data *data = mrq->data;
+ int err;
if (host->sg_count < 0) {
data->error = host->sg_count;
@@ -553,22 +562,19 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
return data->error;
}
- if (data->flags & MMC_DATA_READ)
- return sd_read_long_data(host, mrq);
+ if (data->flags & MMC_DATA_READ) {
+ if (host->initial_mode)
+ sd_disable_initial_mode(host);
- return sd_write_long_data(host, mrq);
-}
+ err = sd_read_long_data(host, mrq);
-static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
-{
- rtsx_pci_write_register(host->pcr, SD_CFG1,
- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
-}
+ if (host->initial_mode)
+ sd_enable_initial_mode(host);
-static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
-{
- rtsx_pci_write_register(host->pcr, SD_CFG1,
- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
+ return err;
+ }
+
+ return sd_write_long_data(host, mrq);
}
static void sd_normal_rw(struct realtek_pci_sdmmc *host,
@@ -895,7 +901,7 @@ static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
return err;
}
-static int sd_power_on(struct realtek_pci_sdmmc *host)
+static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode)
{
struct rtsx_pcr *pcr = host->pcr;
struct mmc_host *mmc = host->mmc;
@@ -903,9 +909,14 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
u32 val;
u8 test_mode;
- if (host->power_state == SDMMC_POWER_ON)
+ if (host->prev_power_state == MMC_POWER_ON)
return 0;
+ if (host->prev_power_state == MMC_POWER_UP) {
+ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0);
+ goto finish;
+ }
+
msleep(100);
rtsx_pci_init_cmd(pcr);
@@ -926,11 +937,16 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
if (err < 0)
return err;
+ mdelay(1);
+
err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
if (err < 0)
return err;
- if (PCI_PID(pcr) == PID_5261) {
+ /* send at least 74 clocks */
+ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
+
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
/*
* If test mode is set switch to SD Express mandatorily,
* this is only for factory testing.
@@ -954,7 +970,8 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
}
}
- host->power_state = SDMMC_POWER_ON;
+finish:
+ host->prev_power_state = power_mode;
return 0;
}
@@ -963,7 +980,7 @@ static int sd_power_off(struct realtek_pci_sdmmc *host)
struct rtsx_pcr *pcr = host->pcr;
int err;
- host->power_state = SDMMC_POWER_OFF;
+ host->prev_power_state = MMC_POWER_OFF;
rtsx_pci_init_cmd(pcr);
@@ -989,7 +1006,7 @@ static int sd_set_power_mode(struct realtek_pci_sdmmc *host,
if (power_mode == MMC_POWER_OFF)
err = sd_power_off(host);
else
- err = sd_power_on(host);
+ err = sd_power_on(host, power_mode);
return err;
}
@@ -1346,6 +1363,14 @@ static int sdmmc_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
struct rtsx_pcr *pcr = host->pcr;
+ if (PCI_PID(pcr) == PID_5264) {
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS, PCI_EXP_LNKCTL2_TLS_2_5GT);
+ pci_write_config_byte(pcr->pci, 0x80e, 0x02);
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS, PCI_EXP_LNKCTL2_TLS_5_0GT);
+ }
+
/* Set relink_time for changing to PCIe card */
relink_time = 0x8FFF;
@@ -1361,6 +1386,12 @@ static int sdmmc_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
if (pcr->ops->disable_auto_blink)
pcr->ops->disable_auto_blink(pcr);
+ if (PCI_PID(pcr) == PID_5264) {
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG2,
+ RTS5264_CHIP_RST_N_SEL, RTS5264_CHIP_RST_N_SEL);
+ rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
+ }
+
/* For PCIe/NVMe mode can't enter delink issue */
pcr->hw_param.interrupt_en &= ~(SD_INT_EN);
rtsx_pci_writel(pcr, RTSX_BIER, pcr->hw_param.interrupt_en);
@@ -1456,6 +1487,7 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
struct realtek_pci_sdmmc *host;
struct rtsx_pcr *pcr;
struct pcr_handle *handle = pdev->dev.platform_data;
+ int ret;
if (!handle)
return -ENXIO;
@@ -1466,16 +1498,17 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
dev_dbg(&(pdev->dev), ": Realtek PCI-E SDMMC controller found\n");
- mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
host = mmc_priv(mmc);
host->pcr = pcr;
+ mmc->ios.power_delay_ms = 5;
host->mmc = mmc;
host->pdev = pdev;
host->cookie = -1;
- host->power_state = SDMMC_POWER_OFF;
+ host->prev_power_state = MMC_POWER_OFF;
INIT_WORK(&host->work, sd_request);
platform_set_drvdata(pdev, host);
pcr->slots[RTSX_SD_CARD].p_dev = pdev;
@@ -1485,37 +1518,34 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
realtek_init_host(host);
- if (pcr->rtd3_en) {
- pm_runtime_set_autosuspend_delay(&pdev->dev, 5000);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- }
-
+ pm_runtime_no_callbacks(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 200);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
return 0;
}
-static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
+static void rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
{
struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
struct rtsx_pcr *pcr;
struct mmc_host *mmc;
- if (!host)
- return 0;
-
pcr = host->pcr;
pcr->slots[RTSX_SD_CARD].p_dev = NULL;
pcr->slots[RTSX_SD_CARD].card_event = NULL;
mmc = host->mmc;
- if (pcr->rtd3_en) {
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- }
-
cancel_work_sync(&host->work);
mutex_lock(&host->host_mutex);
@@ -1538,12 +1568,11 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
flush_work(&host->work);
- mmc_free_host(mmc);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
dev_dbg(&(pdev->dev),
": Realtek PCI-E SDMMC controller has been removed\n");
-
- return 0;
}
static const struct platform_device_id rtsx_pci_sdmmc_ids[] = {
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 5fe4528e296e..84674659a84d 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -21,7 +21,7 @@
#include <linux/pm_runtime.h>
#include <linux/rtsx_usb.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
defined(CONFIG_MMC_REALTEK_USB_MODULE))
@@ -48,7 +48,7 @@ struct rtsx_usb_sdmmc {
bool ddr_mode;
unsigned char power_mode;
-
+ u16 ocp_stat;
#ifdef RTSX_USB_USE_LEDS_CLASS
struct led_classdev led;
char led_name[32];
@@ -313,9 +313,6 @@ static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host,
case MMC_RSP_R1:
rsp_type = SD_RSP_TYPE_R1;
break;
- case MMC_RSP_R1_NO_CRC:
- rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
- break;
case MMC_RSP_R1B:
rsp_type = SD_RSP_TYPE_R1b;
break;
@@ -792,12 +789,20 @@ static int sdmmc_get_cd(struct mmc_host *mmc)
if (err)
goto no_card;
+ /* get OCP status */
+ host->ocp_stat = (val >> 4) & 0x03;
+
if (val & SD_CD) {
host->card_exist = true;
return 1;
}
no_card:
+ /* clear OCP status */
+ if (host->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) {
+ rtsx_usb_write_register(ucr, OCPCTL, MS_OCP_CLEAR, MS_OCP_CLEAR);
+ host->ocp_stat = 0;
+ }
host->card_exist = false;
return 0;
}
@@ -821,7 +826,11 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
cmd->error = -ENOMEDIUM;
goto finish_detect_card;
}
-
+ /* check OCP stat */
+ if (host->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) {
+ cmd->error = -ENOMEDIUM;
+ goto finish_detect_card;
+ }
mutex_lock(&ucr->dev_mutex);
mutex_lock(&host->host_mutex);
@@ -955,6 +964,10 @@ static int sd_power_on(struct rtsx_usb_sdmmc *host)
struct rtsx_ucr *ucr = host->ucr;
int err;
+ if (host->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) {
+ dev_dbg(sdmmc_dev(host), "over current\n");
+ return -EIO;
+ }
dev_dbg(sdmmc_dev(host), "%s\n", __func__);
rtsx_usb_init_cmd(ucr);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL);
@@ -981,8 +994,18 @@ static int sd_power_on(struct rtsx_usb_sdmmc *host)
usleep_range(800, 1000);
rtsx_usb_init_cmd(ucr);
+ /* WA OCP issue: after OCP, there were problems with reopen card power */
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK, POWER_ON);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, FPDCTL, SSC_POWER_MASK, SSC_POWER_DOWN);
+ err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
+ if (err)
+ return err;
+ msleep(20);
+ rtsx_usb_write_register(ucr, FPDCTL, SSC_POWER_MASK, SSC_POWER_ON);
+ usleep_range(180, 200);
+ rtsx_usb_init_cmd(ucr);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
- POWER_MASK|LDO3318_PWR_MASK, POWER_ON|LDO_ON);
+ LDO3318_PWR_MASK, LDO_ON);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE,
SD_OUTPUT_EN, SD_OUTPUT_EN);
@@ -1013,36 +1036,51 @@ static int sd_power_off(struct rtsx_usb_sdmmc *host)
return sd_pull_ctl_disable_qfn24(ucr);
}
-static int sd_set_power_mode(struct rtsx_usb_sdmmc *host,
+static void sd_set_power_mode(struct rtsx_usb_sdmmc *host,
unsigned char power_mode)
{
int err;
-
- if (power_mode != MMC_POWER_OFF)
- power_mode = MMC_POWER_ON;
+ struct rtsx_ucr *ucr = host->ucr;
if (power_mode == host->power_mode)
- return 0;
+ return;
- if (power_mode == MMC_POWER_OFF) {
+ switch (power_mode) {
+ case MMC_POWER_OFF:
err = sd_power_off(host);
+ if (err)
+ dev_dbg(sdmmc_dev(host), "power-off (err = %d)\n", err);
pm_runtime_put_noidle(sdmmc_dev(host));
- } else {
+ break;
+
+ case MMC_POWER_UP:
pm_runtime_get_noresume(sdmmc_dev(host));
err = sd_power_on(host);
- }
+ if (err)
+ dev_dbg(sdmmc_dev(host), "power-on (err = %d)\n", err);
+ /* issue the clock signals to card at least 74 clocks */
+ rtsx_usb_write_register(ucr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
+ break;
- if (!err)
- host->power_mode = power_mode;
+ case MMC_POWER_ON:
+ /* stop to send the clock signals */
+ rtsx_usb_write_register(ucr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0x00);
+ break;
- return err;
+ case MMC_POWER_UNDEFINED:
+ break;
+
+ default:
+ break;
+ }
+
+ host->power_mode = power_mode;
}
static int sd_set_timing(struct rtsx_usb_sdmmc *host,
unsigned char timing, bool *ddr_mode)
{
struct rtsx_ucr *ucr = host->ucr;
- int err;
*ddr_mode = false;
@@ -1097,9 +1135,7 @@ static int sd_set_timing(struct rtsx_usb_sdmmc *host,
break;
}
- err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
-
- return err;
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
}
static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -1322,6 +1358,7 @@ static void rtsx_usb_init_host(struct rtsx_usb_sdmmc *host)
mmc->max_req_size = 524288;
host->power_mode = MMC_POWER_OFF;
+ host->ocp_stat = 0;
}
static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
@@ -1332,6 +1369,7 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
#ifdef RTSX_USB_USE_LEDS_CLASS
int err;
#endif
+ int ret;
ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent));
if (!ucr)
@@ -1339,7 +1377,7 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
dev_dbg(&(pdev->dev), ": Realtek USB SD/MMC controller found\n");
- mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
@@ -1368,18 +1406,25 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
INIT_WORK(&host->led_work, rtsx_usb_update_led);
#endif
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret) {
+#ifdef RTSX_USB_USE_LEDS_CLASS
+ led_classdev_unregister(&host->led);
+#endif
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
return 0;
}
-static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
+static void rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
{
struct rtsx_usb_sdmmc *host = platform_get_drvdata(pdev);
struct mmc_host *mmc;
if (!host)
- return 0;
+ return;
mmc = host->mmc;
host->host_removal = true;
@@ -1403,17 +1448,13 @@ static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
led_classdev_unregister(&host->led);
#endif
- mmc_free_host(mmc);
pm_runtime_disable(&pdev->dev);
platform_set_drvdata(pdev, NULL);
dev_dbg(&(pdev->dev),
": Realtek USB SD/MMC module has been removed\n");
-
- return 0;
}
-#ifdef CONFIG_PM
static int rtsx_usb_sdmmc_runtime_suspend(struct device *dev)
{
struct rtsx_usb_sdmmc *host = dev_get_drvdata(dev);
@@ -1431,11 +1472,9 @@ static int rtsx_usb_sdmmc_runtime_resume(struct device *dev)
mmc_detect_change(host->mmc, 0);
return 0;
}
-#endif
static const struct dev_pm_ops rtsx_usb_sdmmc_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(rtsx_usb_sdmmc_runtime_suspend,
- rtsx_usb_sdmmc_runtime_resume, NULL)
+ RUNTIME_PM_OPS(rtsx_usb_sdmmc_runtime_suspend, rtsx_usb_sdmmc_runtime_resume, NULL)
};
static const struct platform_device_id rtsx_usb_sdmmc_ids[] = {
@@ -1454,7 +1493,7 @@ static struct platform_driver rtsx_usb_sdmmc_driver = {
.driver = {
.name = "rtsx_usb_sdmmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &rtsx_usb_sdmmc_dev_pm_ops,
+ .pm = pm_ptr(&rtsx_usb_sdmmc_dev_pm_ops),
},
};
module_platform_driver(rtsx_usb_sdmmc_driver);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
deleted file mode 100644
index 8d5929a32d34..000000000000
--- a/drivers/mmc/host/s3cmci.c
+++ /dev/null
@@ -1,1777 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver
- *
- * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
- *
- * Current driver maintained by Ben Dooks and Simtec Electronics
- * Copyright (C) 2008 Simtec Electronics <ben-linux@fluff.org>
- */
-
-#include <linux/module.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/clk.h>
-#include <linux/mmc/host.h>
-#include <linux/platform_device.h>
-#include <linux/cpufreq.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/gpio/consumer.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/mmc/slot-gpio.h>
-#include <linux/platform_data/mmc-s3cmci.h>
-
-#include "s3cmci.h"
-
-#define DRIVER_NAME "s3c-mci"
-
-#define S3C2410_SDICON (0x00)
-#define S3C2410_SDIPRE (0x04)
-#define S3C2410_SDICMDARG (0x08)
-#define S3C2410_SDICMDCON (0x0C)
-#define S3C2410_SDICMDSTAT (0x10)
-#define S3C2410_SDIRSP0 (0x14)
-#define S3C2410_SDIRSP1 (0x18)
-#define S3C2410_SDIRSP2 (0x1C)
-#define S3C2410_SDIRSP3 (0x20)
-#define S3C2410_SDITIMER (0x24)
-#define S3C2410_SDIBSIZE (0x28)
-#define S3C2410_SDIDCON (0x2C)
-#define S3C2410_SDIDCNT (0x30)
-#define S3C2410_SDIDSTA (0x34)
-#define S3C2410_SDIFSTA (0x38)
-
-#define S3C2410_SDIDATA (0x3C)
-#define S3C2410_SDIIMSK (0x40)
-
-#define S3C2440_SDIDATA (0x40)
-#define S3C2440_SDIIMSK (0x3C)
-
-#define S3C2440_SDICON_SDRESET (1 << 8)
-#define S3C2410_SDICON_SDIOIRQ (1 << 3)
-#define S3C2410_SDICON_FIFORESET (1 << 1)
-#define S3C2410_SDICON_CLOCKTYPE (1 << 0)
-
-#define S3C2410_SDICMDCON_LONGRSP (1 << 10)
-#define S3C2410_SDICMDCON_WAITRSP (1 << 9)
-#define S3C2410_SDICMDCON_CMDSTART (1 << 8)
-#define S3C2410_SDICMDCON_SENDERHOST (1 << 6)
-#define S3C2410_SDICMDCON_INDEX (0x3f)
-
-#define S3C2410_SDICMDSTAT_CRCFAIL (1 << 12)
-#define S3C2410_SDICMDSTAT_CMDSENT (1 << 11)
-#define S3C2410_SDICMDSTAT_CMDTIMEOUT (1 << 10)
-#define S3C2410_SDICMDSTAT_RSPFIN (1 << 9)
-
-#define S3C2440_SDIDCON_DS_WORD (2 << 22)
-#define S3C2410_SDIDCON_TXAFTERRESP (1 << 20)
-#define S3C2410_SDIDCON_RXAFTERCMD (1 << 19)
-#define S3C2410_SDIDCON_BLOCKMODE (1 << 17)
-#define S3C2410_SDIDCON_WIDEBUS (1 << 16)
-#define S3C2410_SDIDCON_DMAEN (1 << 15)
-#define S3C2410_SDIDCON_STOP (1 << 14)
-#define S3C2440_SDIDCON_DATSTART (1 << 14)
-
-#define S3C2410_SDIDCON_XFER_RXSTART (2 << 12)
-#define S3C2410_SDIDCON_XFER_TXSTART (3 << 12)
-
-#define S3C2410_SDIDCON_BLKNUM_MASK (0xFFF)
-
-#define S3C2410_SDIDSTA_SDIOIRQDETECT (1 << 9)
-#define S3C2410_SDIDSTA_FIFOFAIL (1 << 8)
-#define S3C2410_SDIDSTA_CRCFAIL (1 << 7)
-#define S3C2410_SDIDSTA_RXCRCFAIL (1 << 6)
-#define S3C2410_SDIDSTA_DATATIMEOUT (1 << 5)
-#define S3C2410_SDIDSTA_XFERFINISH (1 << 4)
-#define S3C2410_SDIDSTA_TXDATAON (1 << 1)
-#define S3C2410_SDIDSTA_RXDATAON (1 << 0)
-
-#define S3C2440_SDIFSTA_FIFORESET (1 << 16)
-#define S3C2440_SDIFSTA_FIFOFAIL (3 << 14)
-#define S3C2410_SDIFSTA_TFDET (1 << 13)
-#define S3C2410_SDIFSTA_RFDET (1 << 12)
-#define S3C2410_SDIFSTA_COUNTMASK (0x7f)
-
-#define S3C2410_SDIIMSK_RESPONSECRC (1 << 17)
-#define S3C2410_SDIIMSK_CMDSENT (1 << 16)
-#define S3C2410_SDIIMSK_CMDTIMEOUT (1 << 15)
-#define S3C2410_SDIIMSK_RESPONSEND (1 << 14)
-#define S3C2410_SDIIMSK_SDIOIRQ (1 << 12)
-#define S3C2410_SDIIMSK_FIFOFAIL (1 << 11)
-#define S3C2410_SDIIMSK_CRCSTATUS (1 << 10)
-#define S3C2410_SDIIMSK_DATACRC (1 << 9)
-#define S3C2410_SDIIMSK_DATATIMEOUT (1 << 8)
-#define S3C2410_SDIIMSK_DATAFINISH (1 << 7)
-#define S3C2410_SDIIMSK_TXFIFOHALF (1 << 4)
-#define S3C2410_SDIIMSK_RXFIFOLAST (1 << 2)
-#define S3C2410_SDIIMSK_RXFIFOHALF (1 << 0)
-
-enum dbg_channels {
- dbg_err = (1 << 0),
- dbg_debug = (1 << 1),
- dbg_info = (1 << 2),
- dbg_irq = (1 << 3),
- dbg_sg = (1 << 4),
- dbg_dma = (1 << 5),
- dbg_pio = (1 << 6),
- dbg_fail = (1 << 7),
- dbg_conf = (1 << 8),
-};
-
-static const int dbgmap_err = dbg_fail;
-static const int dbgmap_info = dbg_info | dbg_conf;
-static const int dbgmap_debug = dbg_err | dbg_debug;
-
-#define dbg(host, channels, args...) \
- do { \
- if (dbgmap_err & channels) \
- dev_err(&host->pdev->dev, args); \
- else if (dbgmap_info & channels) \
- dev_info(&host->pdev->dev, args); \
- else if (dbgmap_debug & channels) \
- dev_dbg(&host->pdev->dev, args); \
- } while (0)
-
-static void finalize_request(struct s3cmci_host *host);
-static void s3cmci_send_request(struct mmc_host *mmc);
-static void s3cmci_reset(struct s3cmci_host *host);
-
-#ifdef CONFIG_MMC_DEBUG
-
-static void dbg_dumpregs(struct s3cmci_host *host, char *prefix)
-{
- u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer;
- u32 datcon, datcnt, datsta, fsta;
-
- con = readl(host->base + S3C2410_SDICON);
- pre = readl(host->base + S3C2410_SDIPRE);
- cmdarg = readl(host->base + S3C2410_SDICMDARG);
- cmdcon = readl(host->base + S3C2410_SDICMDCON);
- cmdsta = readl(host->base + S3C2410_SDICMDSTAT);
- r0 = readl(host->base + S3C2410_SDIRSP0);
- r1 = readl(host->base + S3C2410_SDIRSP1);
- r2 = readl(host->base + S3C2410_SDIRSP2);
- r3 = readl(host->base + S3C2410_SDIRSP3);
- timer = readl(host->base + S3C2410_SDITIMER);
- datcon = readl(host->base + S3C2410_SDIDCON);
- datcnt = readl(host->base + S3C2410_SDIDCNT);
- datsta = readl(host->base + S3C2410_SDIDSTA);
- fsta = readl(host->base + S3C2410_SDIFSTA);
-
- dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n",
- prefix, con, pre, timer);
-
- dbg(host, dbg_debug, "%s CCON:[%08x] CARG:[%08x] CSTA:[%08x]\n",
- prefix, cmdcon, cmdarg, cmdsta);
-
- dbg(host, dbg_debug, "%s DCON:[%08x] FSTA:[%08x]"
- " DSTA:[%08x] DCNT:[%08x]\n",
- prefix, datcon, fsta, datsta, datcnt);
-
- dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]"
- " R2:[%08x] R3:[%08x]\n",
- prefix, r0, r1, r2, r3);
-}
-
-static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
- int stop)
-{
- snprintf(host->dbgmsg_cmd, 300,
- "#%u%s op:%i arg:0x%08x flags:0x08%x retries:%u",
- host->ccnt, (stop ? " (STOP)" : ""),
- cmd->opcode, cmd->arg, cmd->flags, cmd->retries);
-
- if (cmd->data) {
- snprintf(host->dbgmsg_dat, 300,
- "#%u bsize:%u blocks:%u bytes:%u",
- host->dcnt, cmd->data->blksz,
- cmd->data->blocks,
- cmd->data->blocks * cmd->data->blksz);
- } else {
- host->dbgmsg_dat[0] = '\0';
- }
-}
-
-static void dbg_dumpcmd(struct s3cmci_host *host, struct mmc_command *cmd,
- int fail)
-{
- unsigned int dbglvl = fail ? dbg_fail : dbg_debug;
-
- if (!cmd)
- return;
-
- if (cmd->error == 0) {
- dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n",
- host->dbgmsg_cmd, cmd->resp[0]);
- } else {
- dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n",
- cmd->error, host->dbgmsg_cmd, host->status);
- }
-
- if (!cmd->data)
- return;
-
- if (cmd->data->error == 0) {
- dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat);
- } else {
- dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n",
- cmd->data->error, host->dbgmsg_dat,
- readl(host->base + S3C2410_SDIDCNT));
- }
-}
-#else
-static void dbg_dumpcmd(struct s3cmci_host *host,
- struct mmc_command *cmd, int fail) { }
-
-static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
- int stop) { }
-
-static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { }
-
-#endif /* CONFIG_MMC_DEBUG */
-
-/**
- * s3cmci_host_usedma - return whether the host is using dma or pio
- * @host: The host state
- *
- * Return true if the host is using DMA to transfer data, else false
- * to use PIO mode. Will return static data depending on the driver
- * configuration.
- */
-static inline bool s3cmci_host_usedma(struct s3cmci_host *host)
-{
-#ifdef CONFIG_MMC_S3C_PIO
- return false;
-#else /* CONFIG_MMC_S3C_DMA */
- return true;
-#endif
-}
-
-static inline u32 enable_imask(struct s3cmci_host *host, u32 imask)
-{
- u32 newmask;
-
- newmask = readl(host->base + host->sdiimsk);
- newmask |= imask;
-
- writel(newmask, host->base + host->sdiimsk);
-
- return newmask;
-}
-
-static inline u32 disable_imask(struct s3cmci_host *host, u32 imask)
-{
- u32 newmask;
-
- newmask = readl(host->base + host->sdiimsk);
- newmask &= ~imask;
-
- writel(newmask, host->base + host->sdiimsk);
-
- return newmask;
-}
-
-static inline void clear_imask(struct s3cmci_host *host)
-{
- u32 mask = readl(host->base + host->sdiimsk);
-
- /* preserve the SDIO IRQ mask state */
- mask &= S3C2410_SDIIMSK_SDIOIRQ;
- writel(mask, host->base + host->sdiimsk);
-}
-
-/**
- * s3cmci_check_sdio_irq - test whether the SDIO IRQ is being signalled
- * @host: The host to check.
- *
- * Test to see if the SDIO interrupt is being signalled in case the
- * controller has failed to re-detect a card interrupt. Read GPE8 and
- * see if it is low and if so, signal a SDIO interrupt.
- *
- * This is currently called if a request is finished (we assume that the
- * bus is now idle) and when the SDIO IRQ is enabled in case the IRQ is
- * already being indicated.
-*/
-static void s3cmci_check_sdio_irq(struct s3cmci_host *host)
-{
- if (host->sdio_irqen) {
- if (host->pdata->bus[3] &&
- gpiod_get_value(host->pdata->bus[3]) == 0) {
- pr_debug("%s: signalling irq\n", __func__);
- mmc_signal_sdio_irq(host->mmc);
- }
- }
-}
-
-static inline int get_data_buffer(struct s3cmci_host *host,
- u32 *bytes, u32 **pointer)
-{
- struct scatterlist *sg;
-
- if (host->pio_active == XFER_NONE)
- return -EINVAL;
-
- if ((!host->mrq) || (!host->mrq->data))
- return -EINVAL;
-
- if (host->pio_sgptr >= host->mrq->data->sg_len) {
- dbg(host, dbg_debug, "no more buffers (%i/%i)\n",
- host->pio_sgptr, host->mrq->data->sg_len);
- return -EBUSY;
- }
- sg = &host->mrq->data->sg[host->pio_sgptr];
-
- *bytes = sg->length;
- *pointer = sg_virt(sg);
-
- host->pio_sgptr++;
-
- dbg(host, dbg_sg, "new buffer (%i/%i)\n",
- host->pio_sgptr, host->mrq->data->sg_len);
-
- return 0;
-}
-
-static inline u32 fifo_count(struct s3cmci_host *host)
-{
- u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
-
- fifostat &= S3C2410_SDIFSTA_COUNTMASK;
- return fifostat;
-}
-
-static inline u32 fifo_free(struct s3cmci_host *host)
-{
- u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
-
- fifostat &= S3C2410_SDIFSTA_COUNTMASK;
- return 63 - fifostat;
-}
-
-/**
- * s3cmci_enable_irq - enable IRQ, after having disabled it.
- * @host: The device state.
- * @more: True if more IRQs are expected from transfer.
- *
- * Enable the main IRQ if needed after it has been disabled.
- *
- * The IRQ can be one of the following states:
- * - disabled during IDLE
- * - disabled whilst processing data
- * - enabled during transfer
- * - enabled whilst awaiting SDIO interrupt detection
- */
-static void s3cmci_enable_irq(struct s3cmci_host *host, bool more)
-{
- unsigned long flags;
- bool enable = false;
-
- local_irq_save(flags);
-
- host->irq_enabled = more;
- host->irq_disabled = false;
-
- enable = more | host->sdio_irqen;
-
- if (host->irq_state != enable) {
- host->irq_state = enable;
-
- if (enable)
- enable_irq(host->irq);
- else
- disable_irq(host->irq);
- }
-
- local_irq_restore(flags);
-}
-
-static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- /* pr_debug("%s: transfer %d\n", __func__, transfer); */
-
- host->irq_disabled = transfer;
-
- if (transfer && host->irq_state) {
- host->irq_state = false;
- disable_irq(host->irq);
- }
-
- local_irq_restore(flags);
-}
-
-static void do_pio_read(struct s3cmci_host *host)
-{
- int res;
- u32 fifo;
- u32 *ptr;
- u32 fifo_words;
- void __iomem *from_ptr;
-
- /* write real prescaler to host, it might be set slow to fix */
- writel(host->prescaler, host->base + S3C2410_SDIPRE);
-
- from_ptr = host->base + host->sdidata;
-
- while ((fifo = fifo_count(host))) {
- if (!host->pio_bytes) {
- res = get_data_buffer(host, &host->pio_bytes,
- &host->pio_ptr);
- if (res) {
- host->pio_active = XFER_NONE;
- host->complete_what = COMPLETION_FINALIZE;
-
- dbg(host, dbg_pio, "pio_read(): "
- "complete (no more data).\n");
- return;
- }
-
- dbg(host, dbg_pio,
- "pio_read(): new target: [%i]@[%p]\n",
- host->pio_bytes, host->pio_ptr);
- }
-
- dbg(host, dbg_pio,
- "pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n",
- fifo, host->pio_bytes,
- readl(host->base + S3C2410_SDIDCNT));
-
- /* If we have reached the end of the block, we can
- * read a word and get 1 to 3 bytes. If we in the
- * middle of the block, we have to read full words,
- * otherwise we will write garbage, so round down to
- * an even multiple of 4. */
- if (fifo >= host->pio_bytes)
- fifo = host->pio_bytes;
- else
- fifo -= fifo & 3;
-
- host->pio_bytes -= fifo;
- host->pio_count += fifo;
-
- fifo_words = fifo >> 2;
- ptr = host->pio_ptr;
- while (fifo_words--)
- *ptr++ = readl(from_ptr);
- host->pio_ptr = ptr;
-
- if (fifo & 3) {
- u32 n = fifo & 3;
- u32 data = readl(from_ptr);
- u8 *p = (u8 *)host->pio_ptr;
-
- while (n--) {
- *p++ = data;
- data >>= 8;
- }
- }
- }
-
- if (!host->pio_bytes) {
- res = get_data_buffer(host, &host->pio_bytes, &host->pio_ptr);
- if (res) {
- dbg(host, dbg_pio,
- "pio_read(): complete (no more buffers).\n");
- host->pio_active = XFER_NONE;
- host->complete_what = COMPLETION_FINALIZE;
-
- return;
- }
- }
-
- enable_imask(host,
- S3C2410_SDIIMSK_RXFIFOHALF | S3C2410_SDIIMSK_RXFIFOLAST);
-}
-
-static void do_pio_write(struct s3cmci_host *host)
-{
- void __iomem *to_ptr;
- int res;
- u32 fifo;
- u32 *ptr;
-
- to_ptr = host->base + host->sdidata;
-
- while ((fifo = fifo_free(host)) > 3) {
- if (!host->pio_bytes) {
- res = get_data_buffer(host, &host->pio_bytes,
- &host->pio_ptr);
- if (res) {
- dbg(host, dbg_pio,
- "pio_write(): complete (no more data).\n");
- host->pio_active = XFER_NONE;
-
- return;
- }
-
- dbg(host, dbg_pio,
- "pio_write(): new source: [%i]@[%p]\n",
- host->pio_bytes, host->pio_ptr);
-
- }
-
- /* If we have reached the end of the block, we have to
- * write exactly the remaining number of bytes. If we
- * in the middle of the block, we have to write full
- * words, so round down to an even multiple of 4. */
- if (fifo >= host->pio_bytes)
- fifo = host->pio_bytes;
- else
- fifo -= fifo & 3;
-
- host->pio_bytes -= fifo;
- host->pio_count += fifo;
-
- fifo = (fifo + 3) >> 2;
- ptr = host->pio_ptr;
- while (fifo--)
- writel(*ptr++, to_ptr);
- host->pio_ptr = ptr;
- }
-
- enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
-}
-
-static void pio_tasklet(struct tasklet_struct *t)
-{
- struct s3cmci_host *host = from_tasklet(host, t, pio_tasklet);
-
- s3cmci_disable_irq(host, true);
-
- if (host->pio_active == XFER_WRITE)
- do_pio_write(host);
-
- if (host->pio_active == XFER_READ)
- do_pio_read(host);
-
- if (host->complete_what == COMPLETION_FINALIZE) {
- clear_imask(host);
- if (host->pio_active != XFER_NONE) {
- dbg(host, dbg_err, "unfinished %s "
- "- pio_count:[%u] pio_bytes:[%u]\n",
- (host->pio_active == XFER_READ) ? "read" : "write",
- host->pio_count, host->pio_bytes);
-
- if (host->mrq->data)
- host->mrq->data->error = -EINVAL;
- }
-
- s3cmci_enable_irq(host, false);
- finalize_request(host);
- } else
- s3cmci_enable_irq(host, true);
-}
-
-/*
- * ISR for SDI Interface IRQ
- * Communication between driver and ISR works as follows:
- * host->mrq points to current request
- * host->complete_what Indicates when the request is considered done
- * COMPLETION_CMDSENT when the command was sent
- * COMPLETION_RSPFIN when a response was received
- * COMPLETION_XFERFINISH when the data transfer is finished
- * COMPLETION_XFERFINISH_RSPFIN both of the above.
- * host->complete_request is the completion-object the driver waits for
- *
- * 1) Driver sets up host->mrq and host->complete_what
- * 2) Driver prepares the transfer
- * 3) Driver enables interrupts
- * 4) Driver starts transfer
- * 5) Driver waits for host->complete_rquest
- * 6) ISR checks for request status (errors and success)
- * 6) ISR sets host->mrq->cmd->error and host->mrq->data->error
- * 7) ISR completes host->complete_request
- * 8) ISR disables interrupts
- * 9) Driver wakes up and takes care of the request
- *
- * Note: "->error"-fields are expected to be set to 0 before the request
- * was issued by mmc.c - therefore they are only set, when an error
- * contition comes up
- */
-
-static irqreturn_t s3cmci_irq(int irq, void *dev_id)
-{
- struct s3cmci_host *host = dev_id;
- struct mmc_command *cmd;
- u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk;
- u32 mci_cclear = 0, mci_dclear;
- unsigned long iflags;
-
- mci_dsta = readl(host->base + S3C2410_SDIDSTA);
- mci_imsk = readl(host->base + host->sdiimsk);
-
- if (mci_dsta & S3C2410_SDIDSTA_SDIOIRQDETECT) {
- if (mci_imsk & S3C2410_SDIIMSK_SDIOIRQ) {
- mci_dclear = S3C2410_SDIDSTA_SDIOIRQDETECT;
- writel(mci_dclear, host->base + S3C2410_SDIDSTA);
-
- mmc_signal_sdio_irq(host->mmc);
- return IRQ_HANDLED;
- }
- }
-
- spin_lock_irqsave(&host->complete_lock, iflags);
-
- mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
- mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
- mci_fsta = readl(host->base + S3C2410_SDIFSTA);
- mci_dclear = 0;
-
- if ((host->complete_what == COMPLETION_NONE) ||
- (host->complete_what == COMPLETION_FINALIZE)) {
- host->status = "nothing to complete";
- clear_imask(host);
- goto irq_out;
- }
-
- if (!host->mrq) {
- host->status = "no active mrq";
- clear_imask(host);
- goto irq_out;
- }
-
- cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd;
-
- if (!cmd) {
- host->status = "no active cmd";
- clear_imask(host);
- goto irq_out;
- }
-
- if (!s3cmci_host_usedma(host)) {
- if ((host->pio_active == XFER_WRITE) &&
- (mci_fsta & S3C2410_SDIFSTA_TFDET)) {
-
- disable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
- tasklet_schedule(&host->pio_tasklet);
- host->status = "pio tx";
- }
-
- if ((host->pio_active == XFER_READ) &&
- (mci_fsta & S3C2410_SDIFSTA_RFDET)) {
-
- disable_imask(host,
- S3C2410_SDIIMSK_RXFIFOHALF |
- S3C2410_SDIIMSK_RXFIFOLAST);
-
- tasklet_schedule(&host->pio_tasklet);
- host->status = "pio rx";
- }
- }
-
- if (mci_csta & S3C2410_SDICMDSTAT_CMDTIMEOUT) {
- dbg(host, dbg_err, "CMDSTAT: error CMDTIMEOUT\n");
- cmd->error = -ETIMEDOUT;
- host->status = "error: command timeout";
- goto fail_transfer;
- }
-
- if (mci_csta & S3C2410_SDICMDSTAT_CMDSENT) {
- if (host->complete_what == COMPLETION_CMDSENT) {
- host->status = "ok: command sent";
- goto close_transfer;
- }
-
- mci_cclear |= S3C2410_SDICMDSTAT_CMDSENT;
- }
-
- if (mci_csta & S3C2410_SDICMDSTAT_CRCFAIL) {
- if (cmd->flags & MMC_RSP_CRC) {
- if (host->mrq->cmd->flags & MMC_RSP_136) {
- dbg(host, dbg_irq,
- "fixup: ignore CRC fail with long rsp\n");
- } else {
- /* note, we used to fail the transfer
- * here, but it seems that this is just
- * the hardware getting it wrong.
- *
- * cmd->error = -EILSEQ;
- * host->status = "error: bad command crc";
- * goto fail_transfer;
- */
- }
- }
-
- mci_cclear |= S3C2410_SDICMDSTAT_CRCFAIL;
- }
-
- if (mci_csta & S3C2410_SDICMDSTAT_RSPFIN) {
- if (host->complete_what == COMPLETION_RSPFIN) {
- host->status = "ok: command response received";
- goto close_transfer;
- }
-
- if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
- host->complete_what = COMPLETION_XFERFINISH;
-
- mci_cclear |= S3C2410_SDICMDSTAT_RSPFIN;
- }
-
- /* errors handled after this point are only relevant
- when a data transfer is in progress */
-
- if (!cmd->data)
- goto clear_status_bits;
-
- /* Check for FIFO failure */
- if (host->is2440) {
- if (mci_fsta & S3C2440_SDIFSTA_FIFOFAIL) {
- dbg(host, dbg_err, "FIFO failure\n");
- host->mrq->data->error = -EILSEQ;
- host->status = "error: 2440 fifo failure";
- goto fail_transfer;
- }
- } else {
- if (mci_dsta & S3C2410_SDIDSTA_FIFOFAIL) {
- dbg(host, dbg_err, "FIFO failure\n");
- cmd->data->error = -EILSEQ;
- host->status = "error: fifo failure";
- goto fail_transfer;
- }
- }
-
- if (mci_dsta & S3C2410_SDIDSTA_RXCRCFAIL) {
- dbg(host, dbg_err, "bad data crc (outgoing)\n");
- cmd->data->error = -EILSEQ;
- host->status = "error: bad data crc (outgoing)";
- goto fail_transfer;
- }
-
- if (mci_dsta & S3C2410_SDIDSTA_CRCFAIL) {
- dbg(host, dbg_err, "bad data crc (incoming)\n");
- cmd->data->error = -EILSEQ;
- host->status = "error: bad data crc (incoming)";
- goto fail_transfer;
- }
-
- if (mci_dsta & S3C2410_SDIDSTA_DATATIMEOUT) {
- dbg(host, dbg_err, "data timeout\n");
- cmd->data->error = -ETIMEDOUT;
- host->status = "error: data timeout";
- goto fail_transfer;
- }
-
- if (mci_dsta & S3C2410_SDIDSTA_XFERFINISH) {
- if (host->complete_what == COMPLETION_XFERFINISH) {
- host->status = "ok: data transfer completed";
- goto close_transfer;
- }
-
- if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
- host->complete_what = COMPLETION_RSPFIN;
-
- mci_dclear |= S3C2410_SDIDSTA_XFERFINISH;
- }
-
-clear_status_bits:
- writel(mci_cclear, host->base + S3C2410_SDICMDSTAT);
- writel(mci_dclear, host->base + S3C2410_SDIDSTA);
-
- goto irq_out;
-
-fail_transfer:
- host->pio_active = XFER_NONE;
-
-close_transfer:
- host->complete_what = COMPLETION_FINALIZE;
-
- clear_imask(host);
- tasklet_schedule(&host->pio_tasklet);
-
- goto irq_out;
-
-irq_out:
- dbg(host, dbg_irq,
- "csta:0x%08x dsta:0x%08x fsta:0x%08x dcnt:0x%08x status:%s.\n",
- mci_csta, mci_dsta, mci_fsta, mci_dcnt, host->status);
-
- spin_unlock_irqrestore(&host->complete_lock, iflags);
- return IRQ_HANDLED;
-
-}
-
-static void s3cmci_dma_done_callback(void *arg)
-{
- struct s3cmci_host *host = arg;
- unsigned long iflags;
-
- BUG_ON(!host->mrq);
- BUG_ON(!host->mrq->data);
-
- spin_lock_irqsave(&host->complete_lock, iflags);
-
- dbg(host, dbg_dma, "DMA FINISHED\n");
-
- host->dma_complete = 1;
- host->complete_what = COMPLETION_FINALIZE;
-
- tasklet_schedule(&host->pio_tasklet);
- spin_unlock_irqrestore(&host->complete_lock, iflags);
-
-}
-
-static void finalize_request(struct s3cmci_host *host)
-{
- struct mmc_request *mrq = host->mrq;
- struct mmc_command *cmd;
- int debug_as_failure = 0;
-
- if (host->complete_what != COMPLETION_FINALIZE)
- return;
-
- if (!mrq)
- return;
- cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
-
- if (cmd->data && (cmd->error == 0) &&
- (cmd->data->error == 0)) {
- if (s3cmci_host_usedma(host) && (!host->dma_complete)) {
- dbg(host, dbg_dma, "DMA Missing (%d)!\n",
- host->dma_complete);
- return;
- }
- }
-
- /* Read response from controller. */
- cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0);
- cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1);
- cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2);
- cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3);
-
- writel(host->prescaler, host->base + S3C2410_SDIPRE);
-
- if (cmd->error)
- debug_as_failure = 1;
-
- if (cmd->data && cmd->data->error)
- debug_as_failure = 1;
-
- dbg_dumpcmd(host, cmd, debug_as_failure);
-
- /* Cleanup controller */
- writel(0, host->base + S3C2410_SDICMDARG);
- writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
- writel(0, host->base + S3C2410_SDICMDCON);
- clear_imask(host);
-
- if (cmd->data && cmd->error)
- cmd->data->error = cmd->error;
-
- if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) {
- host->cmd_is_stop = 1;
- s3cmci_send_request(host->mmc);
- return;
- }
-
- /* If we have no data transfer we are finished here */
- if (!mrq->data)
- goto request_done;
-
- /* Calculate the amout of bytes transfer if there was no error */
- if (mrq->data->error == 0) {
- mrq->data->bytes_xfered =
- (mrq->data->blocks * mrq->data->blksz);
- } else {
- mrq->data->bytes_xfered = 0;
- }
-
- /* If we had an error while transferring data we flush the
- * DMA channel and the fifo to clear out any garbage. */
- if (mrq->data->error != 0) {
- if (s3cmci_host_usedma(host))
- dmaengine_terminate_all(host->dma);
-
- if (host->is2440) {
- /* Clear failure register and reset fifo. */
- writel(S3C2440_SDIFSTA_FIFORESET |
- S3C2440_SDIFSTA_FIFOFAIL,
- host->base + S3C2410_SDIFSTA);
- } else {
- u32 mci_con;
-
- /* reset fifo */
- mci_con = readl(host->base + S3C2410_SDICON);
- mci_con |= S3C2410_SDICON_FIFORESET;
-
- writel(mci_con, host->base + S3C2410_SDICON);
- }
- }
-
-request_done:
- host->complete_what = COMPLETION_NONE;
- host->mrq = NULL;
-
- s3cmci_check_sdio_irq(host);
- mmc_request_done(host->mmc, mrq);
-}
-
-static void s3cmci_send_command(struct s3cmci_host *host,
- struct mmc_command *cmd)
-{
- u32 ccon, imsk;
-
- imsk = S3C2410_SDIIMSK_CRCSTATUS | S3C2410_SDIIMSK_CMDTIMEOUT |
- S3C2410_SDIIMSK_RESPONSEND | S3C2410_SDIIMSK_CMDSENT |
- S3C2410_SDIIMSK_RESPONSECRC;
-
- enable_imask(host, imsk);
-
- if (cmd->data)
- host->complete_what = COMPLETION_XFERFINISH_RSPFIN;
- else if (cmd->flags & MMC_RSP_PRESENT)
- host->complete_what = COMPLETION_RSPFIN;
- else
- host->complete_what = COMPLETION_CMDSENT;
-
- writel(cmd->arg, host->base + S3C2410_SDICMDARG);
-
- ccon = cmd->opcode & S3C2410_SDICMDCON_INDEX;
- ccon |= S3C2410_SDICMDCON_SENDERHOST | S3C2410_SDICMDCON_CMDSTART;
-
- if (cmd->flags & MMC_RSP_PRESENT)
- ccon |= S3C2410_SDICMDCON_WAITRSP;
-
- if (cmd->flags & MMC_RSP_136)
- ccon |= S3C2410_SDICMDCON_LONGRSP;
-
- writel(ccon, host->base + S3C2410_SDICMDCON);
-}
-
-static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
-{
- u32 dcon, imsk, stoptries = 3;
-
- if ((data->blksz & 3) != 0) {
- /* We cannot deal with unaligned blocks with more than
- * one block being transferred. */
-
- if (data->blocks > 1) {
- pr_warn("%s: can't do non-word sized block transfers (blksz %d)\n",
- __func__, data->blksz);
- return -EINVAL;
- }
- }
-
- while (readl(host->base + S3C2410_SDIDSTA) &
- (S3C2410_SDIDSTA_TXDATAON | S3C2410_SDIDSTA_RXDATAON)) {
-
- dbg(host, dbg_err,
- "mci_setup_data() transfer stillin progress.\n");
-
- writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
- s3cmci_reset(host);
-
- if ((stoptries--) == 0) {
- dbg_dumpregs(host, "DRF");
- return -EINVAL;
- }
- }
-
- dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK;
-
- if (s3cmci_host_usedma(host))
- dcon |= S3C2410_SDIDCON_DMAEN;
-
- if (host->bus_width == MMC_BUS_WIDTH_4)
- dcon |= S3C2410_SDIDCON_WIDEBUS;
-
- dcon |= S3C2410_SDIDCON_BLOCKMODE;
-
- if (data->flags & MMC_DATA_WRITE) {
- dcon |= S3C2410_SDIDCON_TXAFTERRESP;
- dcon |= S3C2410_SDIDCON_XFER_TXSTART;
- }
-
- if (data->flags & MMC_DATA_READ) {
- dcon |= S3C2410_SDIDCON_RXAFTERCMD;
- dcon |= S3C2410_SDIDCON_XFER_RXSTART;
- }
-
- if (host->is2440) {
- dcon |= S3C2440_SDIDCON_DS_WORD;
- dcon |= S3C2440_SDIDCON_DATSTART;
- }
-
- writel(dcon, host->base + S3C2410_SDIDCON);
-
- /* write BSIZE register */
-
- writel(data->blksz, host->base + S3C2410_SDIBSIZE);
-
- /* add to IMASK register */
- imsk = S3C2410_SDIIMSK_FIFOFAIL | S3C2410_SDIIMSK_DATACRC |
- S3C2410_SDIIMSK_DATATIMEOUT | S3C2410_SDIIMSK_DATAFINISH;
-
- enable_imask(host, imsk);
-
- /* write TIMER register */
-
- if (host->is2440) {
- writel(0x007FFFFF, host->base + S3C2410_SDITIMER);
- } else {
- writel(0x0000FFFF, host->base + S3C2410_SDITIMER);
-
- /* FIX: set slow clock to prevent timeouts on read */
- if (data->flags & MMC_DATA_READ)
- writel(0xFF, host->base + S3C2410_SDIPRE);
- }
-
- return 0;
-}
-
-#define BOTH_DIR (MMC_DATA_WRITE | MMC_DATA_READ)
-
-static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data)
-{
- int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
-
- BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
-
- host->pio_sgptr = 0;
- host->pio_bytes = 0;
- host->pio_count = 0;
- host->pio_active = rw ? XFER_WRITE : XFER_READ;
-
- if (rw) {
- do_pio_write(host);
- enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
- } else {
- enable_imask(host, S3C2410_SDIIMSK_RXFIFOHALF
- | S3C2410_SDIIMSK_RXFIFOLAST);
- }
-
- return 0;
-}
-
-static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
-{
- int rw = data->flags & MMC_DATA_WRITE;
- struct dma_async_tx_descriptor *desc;
- struct dma_slave_config conf = {
- .src_addr = host->mem->start + host->sdidata,
- .dst_addr = host->mem->start + host->sdidata,
- .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
- .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
- };
-
- BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
-
- /* Restore prescaler value */
- writel(host->prescaler, host->base + S3C2410_SDIPRE);
-
- if (!rw)
- conf.direction = DMA_DEV_TO_MEM;
- else
- conf.direction = DMA_MEM_TO_DEV;
-
- dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
-
- dmaengine_slave_config(host->dma, &conf);
- desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len,
- conf.direction,
- DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
- if (!desc)
- goto unmap_exit;
- desc->callback = s3cmci_dma_done_callback;
- desc->callback_param = host;
- dmaengine_submit(desc);
- dma_async_issue_pending(host->dma);
-
- return 0;
-
-unmap_exit:
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
- return -ENOMEM;
-}
-
-static void s3cmci_send_request(struct mmc_host *mmc)
-{
- struct s3cmci_host *host = mmc_priv(mmc);
- struct mmc_request *mrq = host->mrq;
- struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
-
- host->ccnt++;
- prepare_dbgmsg(host, cmd, host->cmd_is_stop);
-
- /* Clear command, data and fifo status registers
- Fifo clear only necessary on 2440, but doesn't hurt on 2410
- */
- writel(0xFFFFFFFF, host->base + S3C2410_SDICMDSTAT);
- writel(0xFFFFFFFF, host->base + S3C2410_SDIDSTA);
- writel(0xFFFFFFFF, host->base + S3C2410_SDIFSTA);
-
- if (cmd->data) {
- int res = s3cmci_setup_data(host, cmd->data);
-
- host->dcnt++;
-
- if (res) {
- dbg(host, dbg_err, "setup data error %d\n", res);
- cmd->error = res;
- cmd->data->error = res;
-
- mmc_request_done(mmc, mrq);
- return;
- }
-
- if (s3cmci_host_usedma(host))
- res = s3cmci_prepare_dma(host, cmd->data);
- else
- res = s3cmci_prepare_pio(host, cmd->data);
-
- if (res) {
- dbg(host, dbg_err, "data prepare error %d\n", res);
- cmd->error = res;
- cmd->data->error = res;
-
- mmc_request_done(mmc, mrq);
- return;
- }
- }
-
- /* Send command */
- s3cmci_send_command(host, cmd);
-
- /* Enable Interrupt */
- s3cmci_enable_irq(host, true);
-}
-
-static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct s3cmci_host *host = mmc_priv(mmc);
-
- host->status = "mmc request";
- host->cmd_is_stop = 0;
- host->mrq = mrq;
-
- if (mmc_gpio_get_cd(mmc) == 0) {
- dbg(host, dbg_err, "%s: no medium present\n", __func__);
- host->mrq->cmd->error = -ENOMEDIUM;
- mmc_request_done(mmc, mrq);
- } else
- s3cmci_send_request(mmc);
-}
-
-static void s3cmci_set_clk(struct s3cmci_host *host, struct mmc_ios *ios)
-{
- u32 mci_psc;
-
- /* Set clock */
- for (mci_psc = 0; mci_psc < 255; mci_psc++) {
- host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1));
-
- if (host->real_rate <= ios->clock)
- break;
- }
-
- if (mci_psc > 255)
- mci_psc = 255;
-
- host->prescaler = mci_psc;
- writel(host->prescaler, host->base + S3C2410_SDIPRE);
-
- /* If requested clock is 0, real_rate will be 0, too */
- if (ios->clock == 0)
- host->real_rate = 0;
-}
-
-static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct s3cmci_host *host = mmc_priv(mmc);
- u32 mci_con;
-
- /* Set the power state */
-
- mci_con = readl(host->base + S3C2410_SDICON);
-
- switch (ios->power_mode) {
- case MMC_POWER_ON:
- case MMC_POWER_UP:
- if (!host->is2440)
- mci_con |= S3C2410_SDICON_FIFORESET;
- break;
-
- case MMC_POWER_OFF:
- default:
- if (host->is2440)
- mci_con |= S3C2440_SDICON_SDRESET;
- break;
- }
-
- if (host->pdata->set_power)
- host->pdata->set_power(ios->power_mode, ios->vdd);
-
- s3cmci_set_clk(host, ios);
-
- /* Set CLOCK_ENABLE */
- if (ios->clock)
- mci_con |= S3C2410_SDICON_CLOCKTYPE;
- else
- mci_con &= ~S3C2410_SDICON_CLOCKTYPE;
-
- writel(mci_con, host->base + S3C2410_SDICON);
-
- if ((ios->power_mode == MMC_POWER_ON) ||
- (ios->power_mode == MMC_POWER_UP)) {
- dbg(host, dbg_conf, "running at %lukHz (requested: %ukHz).\n",
- host->real_rate/1000, ios->clock/1000);
- } else {
- dbg(host, dbg_conf, "powered down.\n");
- }
-
- host->bus_width = ios->bus_width;
-}
-
-static void s3cmci_reset(struct s3cmci_host *host)
-{
- u32 con = readl(host->base + S3C2410_SDICON);
-
- con |= S3C2440_SDICON_SDRESET;
- writel(con, host->base + S3C2410_SDICON);
-}
-
-static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
-{
- struct s3cmci_host *host = mmc_priv(mmc);
- unsigned long flags;
- u32 con;
-
- local_irq_save(flags);
-
- con = readl(host->base + S3C2410_SDICON);
- host->sdio_irqen = enable;
-
- if (enable == host->sdio_irqen)
- goto same_state;
-
- if (enable) {
- con |= S3C2410_SDICON_SDIOIRQ;
- enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
-
- if (!host->irq_state && !host->irq_disabled) {
- host->irq_state = true;
- enable_irq(host->irq);
- }
- } else {
- disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
- con &= ~S3C2410_SDICON_SDIOIRQ;
-
- if (!host->irq_enabled && host->irq_state) {
- disable_irq_nosync(host->irq);
- host->irq_state = false;
- }
- }
-
- writel(con, host->base + S3C2410_SDICON);
-
- same_state:
- local_irq_restore(flags);
-
- s3cmci_check_sdio_irq(host);
-}
-
-static const struct mmc_host_ops s3cmci_ops = {
- .request = s3cmci_request,
- .set_ios = s3cmci_set_ios,
- .get_ro = mmc_gpio_get_ro,
- .get_cd = mmc_gpio_get_cd,
- .enable_sdio_irq = s3cmci_enable_sdio_irq,
-};
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
-
-static int s3cmci_cpufreq_transition(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct s3cmci_host *host;
- struct mmc_host *mmc;
- unsigned long newclk;
- unsigned long flags;
-
- host = container_of(nb, struct s3cmci_host, freq_transition);
- newclk = clk_get_rate(host->clk);
- mmc = host->mmc;
-
- if ((val == CPUFREQ_PRECHANGE && newclk > host->clk_rate) ||
- (val == CPUFREQ_POSTCHANGE && newclk < host->clk_rate)) {
- spin_lock_irqsave(&mmc->lock, flags);
-
- host->clk_rate = newclk;
-
- if (mmc->ios.power_mode != MMC_POWER_OFF &&
- mmc->ios.clock != 0)
- s3cmci_set_clk(host, &mmc->ios);
-
- spin_unlock_irqrestore(&mmc->lock, flags);
- }
-
- return 0;
-}
-
-static inline int s3cmci_cpufreq_register(struct s3cmci_host *host)
-{
- host->freq_transition.notifier_call = s3cmci_cpufreq_transition;
-
- return cpufreq_register_notifier(&host->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
-{
- cpufreq_unregister_notifier(&host->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-#else
-static inline int s3cmci_cpufreq_register(struct s3cmci_host *host)
-{
- return 0;
-}
-
-static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
-{
-}
-#endif
-
-
-#ifdef CONFIG_DEBUG_FS
-
-static int s3cmci_state_show(struct seq_file *seq, void *v)
-{
- struct s3cmci_host *host = seq->private;
-
- seq_printf(seq, "Register base = 0x%p\n", host->base);
- seq_printf(seq, "Clock rate = %ld\n", host->clk_rate);
- seq_printf(seq, "Prescale = %d\n", host->prescaler);
- seq_printf(seq, "is2440 = %d\n", host->is2440);
- seq_printf(seq, "IRQ = %d\n", host->irq);
- seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled);
- seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled);
- seq_printf(seq, "IRQ state = %d\n", host->irq_state);
- seq_printf(seq, "CD IRQ = %d\n", host->irq_cd);
- seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host));
- seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk);
- seq_printf(seq, "SDIDATA at %d\n", host->sdidata);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(s3cmci_state);
-
-#define DBG_REG(_r) { .addr = S3C2410_SDI##_r, .name = #_r }
-
-struct s3cmci_reg {
- unsigned short addr;
- unsigned char *name;
-};
-
-static const struct s3cmci_reg debug_regs[] = {
- DBG_REG(CON),
- DBG_REG(PRE),
- DBG_REG(CMDARG),
- DBG_REG(CMDCON),
- DBG_REG(CMDSTAT),
- DBG_REG(RSP0),
- DBG_REG(RSP1),
- DBG_REG(RSP2),
- DBG_REG(RSP3),
- DBG_REG(TIMER),
- DBG_REG(BSIZE),
- DBG_REG(DCON),
- DBG_REG(DCNT),
- DBG_REG(DSTA),
- DBG_REG(FSTA),
- {}
-};
-
-static int s3cmci_regs_show(struct seq_file *seq, void *v)
-{
- struct s3cmci_host *host = seq->private;
- const struct s3cmci_reg *rptr = debug_regs;
-
- for (; rptr->name; rptr++)
- seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
- readl(host->base + rptr->addr));
-
- seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk));
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(s3cmci_regs);
-
-static void s3cmci_debugfs_attach(struct s3cmci_host *host)
-{
- struct device *dev = &host->pdev->dev;
- struct dentry *root;
-
- root = debugfs_create_dir(dev_name(dev), NULL);
- host->debug_root = root;
-
- debugfs_create_file("state", 0444, root, host, &s3cmci_state_fops);
- debugfs_create_file("regs", 0444, root, host, &s3cmci_regs_fops);
-}
-
-static void s3cmci_debugfs_remove(struct s3cmci_host *host)
-{
- debugfs_remove_recursive(host->debug_root);
-}
-
-#else
-static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { }
-static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }
-
-#endif /* CONFIG_DEBUG_FS */
-
-static int s3cmci_probe_pdata(struct s3cmci_host *host)
-{
- struct platform_device *pdev = host->pdev;
- struct mmc_host *mmc = host->mmc;
- struct s3c24xx_mci_pdata *pdata;
- int i, ret;
-
- host->is2440 = platform_get_device_id(pdev)->driver_data;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "need platform data");
- return -ENXIO;
- }
-
- for (i = 0; i < 6; i++) {
- pdata->bus[i] = devm_gpiod_get_index(&pdev->dev, "bus", i,
- GPIOD_OUT_LOW);
- if (IS_ERR(pdata->bus[i])) {
- dev_err(&pdev->dev, "failed to get gpio %d\n", i);
- return PTR_ERR(pdata->bus[i]);
- }
- }
-
- if (pdata->no_wprotect)
- mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
-
- if (pdata->no_detect)
- mmc->caps |= MMC_CAP_NEEDS_POLL;
-
- if (pdata->wprotect_invert)
- mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-
- /* If we get -ENOENT we have no card detect GPIO line */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
- if (ret != -ENOENT) {
- dev_err(&pdev->dev, "error requesting GPIO for CD %d\n",
- ret);
- return ret;
- }
-
- ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
- if (ret != -ENOENT) {
- dev_err(&pdev->dev, "error requesting GPIO for WP %d\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
-static int s3cmci_probe_dt(struct s3cmci_host *host)
-{
- struct platform_device *pdev = host->pdev;
- struct s3c24xx_mci_pdata *pdata;
- struct mmc_host *mmc = host->mmc;
- int ret;
-
- host->is2440 = (long) of_device_get_match_data(&pdev->dev);
-
- ret = mmc_of_parse(mmc);
- if (ret)
- return ret;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- pdev->dev.platform_data = pdata;
-
- return 0;
-}
-
-static int s3cmci_probe(struct platform_device *pdev)
-{
- struct s3cmci_host *host;
- struct mmc_host *mmc;
- int ret;
-
- mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto probe_out;
- }
-
- host = mmc_priv(mmc);
- host->mmc = mmc;
- host->pdev = pdev;
-
- if (pdev->dev.of_node)
- ret = s3cmci_probe_dt(host);
- else
- ret = s3cmci_probe_pdata(host);
-
- if (ret)
- goto probe_free_host;
-
- host->pdata = pdev->dev.platform_data;
-
- spin_lock_init(&host->complete_lock);
- tasklet_setup(&host->pio_tasklet, pio_tasklet);
-
- if (host->is2440) {
- host->sdiimsk = S3C2440_SDIIMSK;
- host->sdidata = S3C2440_SDIDATA;
- host->clk_div = 1;
- } else {
- host->sdiimsk = S3C2410_SDIIMSK;
- host->sdidata = S3C2410_SDIDATA;
- host->clk_div = 2;
- }
-
- host->complete_what = COMPLETION_NONE;
- host->pio_active = XFER_NONE;
-
- host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!host->mem) {
- dev_err(&pdev->dev,
- "failed to get io memory region resource.\n");
-
- ret = -ENOENT;
- goto probe_free_host;
- }
-
- host->mem = request_mem_region(host->mem->start,
- resource_size(host->mem), pdev->name);
-
- if (!host->mem) {
- dev_err(&pdev->dev, "failed to request io memory region.\n");
- ret = -ENOENT;
- goto probe_free_host;
- }
-
- host->base = ioremap(host->mem->start, resource_size(host->mem));
- if (!host->base) {
- dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
- ret = -EINVAL;
- goto probe_free_mem_region;
- }
-
- host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0) {
- ret = -EINVAL;
- goto probe_iounmap;
- }
-
- if (request_irq(host->irq, s3cmci_irq, IRQF_NO_AUTOEN, DRIVER_NAME, host)) {
- dev_err(&pdev->dev, "failed to request mci interrupt.\n");
- ret = -ENOENT;
- goto probe_iounmap;
- }
-
- host->irq_state = false;
-
- /* Depending on the dma state, get a DMA channel to use. */
-
- if (s3cmci_host_usedma(host)) {
- host->dma = dma_request_chan(&pdev->dev, "rx-tx");
- ret = PTR_ERR_OR_ZERO(host->dma);
- if (ret) {
- dev_err(&pdev->dev, "cannot get DMA channel.\n");
- goto probe_free_irq;
- }
- }
-
- host->clk = clk_get(&pdev->dev, "sdi");
- if (IS_ERR(host->clk)) {
- dev_err(&pdev->dev, "failed to find clock source.\n");
- ret = PTR_ERR(host->clk);
- host->clk = NULL;
- goto probe_free_dma;
- }
-
- ret = clk_prepare_enable(host->clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable clock source.\n");
- goto clk_free;
- }
-
- host->clk_rate = clk_get_rate(host->clk);
-
- mmc->ops = &s3cmci_ops;
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
-#ifdef CONFIG_MMC_S3C_HW_SDIO_IRQ
- mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
-#else
- mmc->caps = MMC_CAP_4_BIT_DATA;
-#endif
- mmc->f_min = host->clk_rate / (host->clk_div * 256);
- mmc->f_max = host->clk_rate / host->clk_div;
-
- if (host->pdata->ocr_avail)
- mmc->ocr_avail = host->pdata->ocr_avail;
-
- mmc->max_blk_count = 4095;
- mmc->max_blk_size = 4095;
- mmc->max_req_size = 4095 * 512;
- mmc->max_seg_size = mmc->max_req_size;
-
- mmc->max_segs = 128;
-
- dbg(host, dbg_debug,
- "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%p.\n",
- (host->is2440?"2440":""),
- host->base, host->irq, host->irq_cd, host->dma);
-
- ret = s3cmci_cpufreq_register(host);
- if (ret) {
- dev_err(&pdev->dev, "failed to register cpufreq\n");
- goto free_dmabuf;
- }
-
- ret = mmc_add_host(mmc);
- if (ret) {
- dev_err(&pdev->dev, "failed to add mmc host.\n");
- goto free_cpufreq;
- }
-
- s3cmci_debugfs_attach(host);
-
- platform_set_drvdata(pdev, mmc);
- dev_info(&pdev->dev, "%s - using %s, %s SDIO IRQ\n", mmc_hostname(mmc),
- s3cmci_host_usedma(host) ? "dma" : "pio",
- mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw");
-
- return 0;
-
- free_cpufreq:
- s3cmci_cpufreq_deregister(host);
-
- free_dmabuf:
- clk_disable_unprepare(host->clk);
-
- clk_free:
- clk_put(host->clk);
-
- probe_free_dma:
- if (s3cmci_host_usedma(host))
- dma_release_channel(host->dma);
-
- probe_free_irq:
- free_irq(host->irq, host);
-
- probe_iounmap:
- iounmap(host->base);
-
- probe_free_mem_region:
- release_mem_region(host->mem->start, resource_size(host->mem));
-
- probe_free_host:
- mmc_free_host(mmc);
-
- probe_out:
- return ret;
-}
-
-static void s3cmci_shutdown(struct platform_device *pdev)
-{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct s3cmci_host *host = mmc_priv(mmc);
-
- if (host->irq_cd >= 0)
- free_irq(host->irq_cd, host);
-
- s3cmci_debugfs_remove(host);
- s3cmci_cpufreq_deregister(host);
- mmc_remove_host(mmc);
- clk_disable_unprepare(host->clk);
-}
-
-static int s3cmci_remove(struct platform_device *pdev)
-{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct s3cmci_host *host = mmc_priv(mmc);
-
- s3cmci_shutdown(pdev);
-
- clk_put(host->clk);
-
- tasklet_disable(&host->pio_tasklet);
-
- if (s3cmci_host_usedma(host))
- dma_release_channel(host->dma);
-
- free_irq(host->irq, host);
-
- iounmap(host->base);
- release_mem_region(host->mem->start, resource_size(host->mem));
-
- mmc_free_host(mmc);
- return 0;
-}
-
-static const struct of_device_id s3cmci_dt_match[] = {
- {
- .compatible = "samsung,s3c2410-sdi",
- .data = (void *)0,
- },
- {
- .compatible = "samsung,s3c2412-sdi",
- .data = (void *)1,
- },
- {
- .compatible = "samsung,s3c2440-sdi",
- .data = (void *)1,
- },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, s3cmci_dt_match);
-
-static const struct platform_device_id s3cmci_driver_ids[] = {
- {
- .name = "s3c2410-sdi",
- .driver_data = 0,
- }, {
- .name = "s3c2412-sdi",
- .driver_data = 1,
- }, {
- .name = "s3c2440-sdi",
- .driver_data = 1,
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
-
-static struct platform_driver s3cmci_driver = {
- .driver = {
- .name = "s3c-sdi",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = s3cmci_dt_match,
- },
- .id_table = s3cmci_driver_ids,
- .probe = s3cmci_probe,
- .remove = s3cmci_remove,
- .shutdown = s3cmci_shutdown,
-};
-
-module_platform_driver(s3cmci_driver);
-
-MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>");
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
deleted file mode 100644
index 8b65d7ad9f97..000000000000
--- a/drivers/mmc/host/s3cmci.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver
- *
- * Copyright (C) 2004-2006 Thomas Kleffel, All Rights Reserved.
- */
-
-enum s3cmci_waitfor {
- COMPLETION_NONE,
- COMPLETION_FINALIZE,
- COMPLETION_CMDSENT,
- COMPLETION_RSPFIN,
- COMPLETION_XFERFINISH,
- COMPLETION_XFERFINISH_RSPFIN,
-};
-
-struct s3cmci_host {
- struct platform_device *pdev;
- struct s3c24xx_mci_pdata *pdata;
- struct mmc_host *mmc;
- struct resource *mem;
- struct clk *clk;
- void __iomem *base;
- int irq;
- int irq_cd;
- struct dma_chan *dma;
-
- unsigned long clk_rate;
- unsigned long clk_div;
- unsigned long real_rate;
- u8 prescaler;
-
- int is2440;
- unsigned sdiimsk;
- unsigned sdidata;
-
- bool irq_disabled;
- bool irq_enabled;
- bool irq_state;
- int sdio_irqen;
-
- struct mmc_request *mrq;
- int cmd_is_stop;
-
- spinlock_t complete_lock;
- enum s3cmci_waitfor complete_what;
-
- int dma_complete;
-
- u32 pio_sgptr;
- u32 pio_bytes;
- u32 pio_count;
- u32 *pio_ptr;
-#define XFER_NONE 0
-#define XFER_READ 1
-#define XFER_WRITE 2
- u32 pio_active;
-
- int bus_width;
-
- char dbgmsg_cmd[301];
- char dbgmsg_dat[301];
- char *status;
-
- unsigned int ccnt, dcnt;
- struct tasklet_struct pio_tasklet;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debug_root;
-#endif
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
- struct notifier_block freq_transition;
-#endif
-};
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 8fe65f172a61..84c7054607fc 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/io.h>
@@ -31,10 +32,8 @@
#include <linux/mmc/slot-gpio.h>
#ifdef CONFIG_X86
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
+#include <linux/platform_data/x86/soc.h>
#include <asm/iosf_mbi.h>
-#include <linux/pci.h>
#endif
#include "sdhci.h"
@@ -82,6 +81,8 @@ struct sdhci_acpi_host {
enum {
DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP = BIT(0),
DMI_QUIRK_SD_NO_WRITE_PROTECT = BIT(1),
+ DMI_QUIRK_SD_CD_ACTIVE_HIGH = BIT(2),
+ DMI_QUIRK_SD_CD_ENABLE_PULL_UP = BIT(3),
};
static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
@@ -240,26 +241,6 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
#ifdef CONFIG_X86
-static bool sdhci_acpi_byt(void)
-{
- static const struct x86_cpu_id byt[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
- {}
- };
-
- return x86_match_cpu(byt);
-}
-
-static bool sdhci_acpi_cht(void)
-{
- static const struct x86_cpu_id cht[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
- {}
- };
-
- return x86_match_cpu(cht);
-}
-
#define BYT_IOSF_SCCEP 0x63
#define BYT_IOSF_OCP_NETCTRL0 0x1078
#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
@@ -268,7 +249,7 @@ static void sdhci_acpi_byt_setting(struct device *dev)
{
u32 val = 0;
- if (!sdhci_acpi_byt())
+ if (!soc_intel_is_byt())
return;
if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
@@ -293,7 +274,7 @@ static void sdhci_acpi_byt_setting(struct device *dev)
static bool sdhci_acpi_byt_defer(struct device *dev)
{
- if (!sdhci_acpi_byt())
+ if (!soc_intel_is_byt())
return false;
if (!iosf_mbi_available())
@@ -304,43 +285,6 @@ static bool sdhci_acpi_byt_defer(struct device *dev)
return false;
}
-static bool sdhci_acpi_cht_pci_wifi(unsigned int vendor, unsigned int device,
- unsigned int slot, unsigned int parent_slot)
-{
- struct pci_dev *dev, *parent, *from = NULL;
-
- while (1) {
- dev = pci_get_device(vendor, device, from);
- pci_dev_put(from);
- if (!dev)
- break;
- parent = pci_upstream_bridge(dev);
- if (ACPI_COMPANION(&dev->dev) && PCI_SLOT(dev->devfn) == slot &&
- parent && PCI_SLOT(parent->devfn) == parent_slot &&
- !pci_upstream_bridge(parent)) {
- pci_dev_put(dev);
- return true;
- }
- from = dev;
- }
-
- return false;
-}
-
-/*
- * GPDwin uses PCI wifi which conflicts with SDIO's use of
- * acpi_device_fix_up_power() on child device nodes. Identifying GPDwin is
- * problematic, but since SDIO is only used for wifi, the presence of the PCI
- * wifi card in the expected slot with an ACPI companion node, is used to
- * indicate that acpi_device_fix_up_power() should be avoided.
- */
-static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
-{
- return sdhci_acpi_cht() &&
- acpi_dev_hid_uid_match(adev, "80860F14", "2") &&
- sdhci_acpi_cht_pci_wifi(0x14e4, 0x43ec, 0, 28);
-}
-
#else
static inline void sdhci_acpi_byt_setting(struct device *dev)
@@ -352,33 +296,16 @@ static inline bool sdhci_acpi_byt_defer(struct device *dev)
return false;
}
-static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
-{
- return false;
-}
-
#endif
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
- struct sdhci_host *host = mmc_priv(mmc);
- unsigned long flags;
- int ret = 0;
if (!gpio_cd)
return 0;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
-
- ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
-out:
- spin_unlock_irqrestore(&host->lock, flags);
-
- return ret;
+ return sdhci_get_cd_nogpio(mmc);
}
static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
@@ -724,10 +651,10 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
* in reading a garbage value and using the wrong presets.
*
* Since HS400 and HS200 presets must be identical, we could
- * instead use the the SDR104 preset register.
+ * instead use the SDR104 preset register.
*
* If the above issues are resolved we could remove this quirk for
- * firmware that that has valid presets (i.e., SDR12 <= 12 MHz).
+ * firmware that has valid presets (i.e., SDR12 <= 12 MHz).
*/
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
@@ -795,9 +722,30 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
+/* Please keep this list sorted alphabetically */
static const struct dmi_system_id sdhci_acpi_quirks[] = {
{
/*
+ * The Acer Aspire Switch 10 (SW5-012) microSD slot always
+ * reports the card being write-protected even though microSD
+ * cards do not have a write-protect switch at all.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ },
+ {
+ /* Asus T100TA, needs pull-up for cd but DSDT GpioInt has NoPull set */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_CD_ENABLE_PULL_UP,
+ },
+ {
+ /*
* The Lenovo Miix 320-10ICR has a bug in the _PS0 method of
* the SHC1 ACPI device, this bug causes it to reprogram the
* wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the
@@ -812,15 +760,23 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
},
{
/*
- * The Acer Aspire Switch 10 (SW5-012) microSD slot always
- * reports the card being write-protected even though microSD
- * cards do not have a write-protect switch at all.
+ * Lenovo Yoga Tablet 2 Pro 1380F/L (13" Android version) this
+ * has broken WP reporting and an inverted CD signal.
+ * Note this has more or less the same BIOS as the Lenovo Yoga
+ * Tablet 2 830F/L or 1050F/L (8" and 10" Android), but unlike
+ * the 830 / 1050 models which share the same mainboard this
+ * model has a different mainboard and the inverted CD and
+ * broken WP are unique to this board.
*/
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ /* Full match so as to NOT match the 830/1050 BIOS */
+ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"),
},
- .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ .driver_data = (void *)(DMI_QUIRK_SD_NO_WRITE_PROTECT |
+ DMI_QUIRK_SD_CD_ACTIVE_HIGH),
},
{
/*
@@ -833,6 +789,17 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
},
.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
},
+ {
+ /*
+ * The Toshiba WT10-A's microSD slot always reports the card being
+ * write-protected.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ },
{} /* Terminating entry */
};
@@ -851,12 +818,10 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct sdhci_acpi_slot *slot;
- struct acpi_device *device, *child;
const struct dmi_system_id *id;
+ struct acpi_device *device;
struct sdhci_acpi_host *c;
struct sdhci_host *host;
- struct resource *iomem;
- resource_size_t len;
size_t priv_size;
int quirks = 0;
int err;
@@ -872,27 +837,11 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
slot = sdhci_acpi_get_slot(device);
/* Power on the SDHCI controller and its children */
- acpi_device_fix_up_power(device);
- if (!sdhci_acpi_no_fixup_child_power(device)) {
- list_for_each_entry(child, &device->children, node)
- if (child->status.present && child->status.enabled)
- acpi_device_fix_up_power(child);
- }
+ acpi_device_fix_up_power_extended(device);
if (sdhci_acpi_byt_defer(dev))
return -EPROBE_DEFER;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem)
- return -ENOMEM;
-
- len = resource_size(iomem);
- if (len < 0x100)
- dev_err(dev, "Invalid iomem size!\n");
-
- if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
- return -ENOMEM;
-
priv_size = slot ? slot->priv_size : 0;
host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size);
if (IS_ERR(host))
@@ -910,14 +859,13 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->ops = &sdhci_acpi_ops_dflt;
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
- err = -EINVAL;
+ err = host->irq;
goto err_free;
}
- host->ioaddr = devm_ioremap(dev, iomem->start,
- resource_size(iomem));
- if (host->ioaddr == NULL) {
- err = -ENOMEM;
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->ioaddr)) {
+ err = PTR_ERR(host->ioaddr);
goto err_free;
}
@@ -947,12 +895,18 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
+ if (quirks & DMI_QUIRK_SD_CD_ACTIVE_HIGH)
+ host->mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
if (err) {
if (err == -EPROBE_DEFER)
goto err_free;
dev_warn(dev, "failed to setup card detect gpio\n");
c->use_runtime_pm = false;
+ } else if (quirks & DMI_QUIRK_SD_CD_ENABLE_PULL_UP) {
+ mmc_gpiod_set_cd_config(host->mmc,
+ PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 20000));
}
if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP)
@@ -994,11 +948,10 @@ err_free:
if (c->slot && c->slot->free_slot)
c->slot->free_slot(pdev);
- sdhci_free_host(c->host);
return err;
}
-static int sdhci_acpi_remove(struct platform_device *pdev)
+static void sdhci_acpi_remove(struct platform_device *pdev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -1018,14 +971,9 @@ static int sdhci_acpi_remove(struct platform_device *pdev)
if (c->slot && c->slot->free_slot)
c->slot->free_slot(pdev);
-
- sdhci_free_host(c->host);
-
- return 0;
}
-static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed(
- struct device *dev)
+static void sdhci_acpi_reset_signal_voltage_if_needed(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
struct sdhci_host *host = c->host;
@@ -1040,8 +988,6 @@ static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed(
}
}
-#ifdef CONFIG_PM_SLEEP
-
static int sdhci_acpi_suspend(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
@@ -1068,22 +1014,15 @@ static int sdhci_acpi_resume(struct device *dev)
return sdhci_resume_host(c->host);
}
-#endif
-
-#ifdef CONFIG_PM
-
static int sdhci_acpi_runtime_suspend(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
struct sdhci_host *host = c->host;
- int ret;
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
- ret = sdhci_runtime_suspend_host(host);
- if (ret)
- return ret;
+ sdhci_runtime_suspend_host(host);
sdhci_acpi_reset_signal_voltage_if_needed(dev);
return 0;
@@ -1095,15 +1034,13 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
sdhci_acpi_byt_setting(&c->pdev->dev);
- return sdhci_runtime_resume_host(c->host, 0);
+ sdhci_runtime_resume_host(c->host, 0);
+ return 0;
}
-#endif
-
static const struct dev_pm_ops sdhci_acpi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume)
- SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend,
- sdhci_acpi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume)
+ RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend, sdhci_acpi_runtime_resume, NULL)
};
static struct platform_driver sdhci_acpi_driver = {
@@ -1111,10 +1048,10 @@ static struct platform_driver sdhci_acpi_driver = {
.name = "sdhci-acpi",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.acpi_match_table = sdhci_acpi_ids,
- .pm = &sdhci_acpi_pm_ops,
+ .pm = pm_ptr(&sdhci_acpi_pm_ops),
},
.probe = sdhci_acpi_probe,
- .remove = sdhci_acpi_remove,
+ .remove = sdhci_acpi_remove,
};
module_platform_driver(sdhci_acpi_driver);
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index 4d4aac85cc7a..0f2a84f769b6 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2013 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2013 Broadcom Corporation
#include <linux/kernel.h>
#include <linux/module.h>
@@ -21,7 +11,6 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mmc/slot-gpio.h>
#include "sdhci-pltfm.h"
@@ -118,7 +107,7 @@ static void sdhci_bcm_kona_sd_init(struct sdhci_host *host)
* Software emulation of the SD card insertion/removal. Set insert=1 for insert
* and insert=0 for removal. The card detection is done by GPIO. For Broadcom
* IP to function properly the bit 0 of CORESTAT register needs to be set/reset
- * to generate the CD IRQ handled in sdhci.c which schedules card_tasklet.
+* to generate the CD IRQ handled in sdhci.c
*/
static int sdhci_bcm_kona_sd_card_emulate(struct sdhci_host *host, int insert)
{
@@ -178,7 +167,7 @@ static void sdhci_bcm_kona_init_74_clocks(struct sdhci_host *host,
/*
* JEDEC and SD spec specify supplying 74 continuous clocks to
* device after power up. With minimum bus (100KHz) that
- * that translates to 740us
+ * translates to 740us
*/
if (power_mode != MMC_POWER_OFF)
udelay(740);
@@ -315,12 +304,20 @@ err_clk_disable:
clk_disable_unprepare(pltfm_priv->clk);
err_pltfm_free:
- sdhci_pltfm_free(pdev);
-
dev_err(dev, "Probing of sdhci-pltfm failed: %d\n", ret);
return ret;
}
+static void sdhci_bcm_kona_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct clk *clk = pltfm_host->clk;
+
+ sdhci_pltfm_remove(pdev);
+ clk_disable_unprepare(clk);
+}
+
static struct platform_driver sdhci_bcm_kona_driver = {
.driver = {
.name = "sdhci-kona",
@@ -329,7 +326,7 @@ static struct platform_driver sdhci_bcm_kona_driver = {
.of_match_table = sdhci_bcm_kona_of_match,
},
.probe = sdhci_bcm_kona_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove = sdhci_bcm_kona_remove,
};
module_platform_driver(sdhci_bcm_kona_driver);
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index f24623aac2db..c9442499876c 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -6,34 +6,206 @@
*/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/bitops.h>
#include <linux/delay.h>
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "cqhci.h"
#define SDHCI_VENDOR 0x78
#define SDHCI_VENDOR_ENHANCED_STRB 0x1
+#define SDHCI_VENDOR_GATE_SDCLK_EN 0x2
-#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0)
-#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1)
+#define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0)
+#define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1)
+#define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2)
+#define BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY BIT(4)
+
+#define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0)
+#define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1)
#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
-struct sdhci_brcmstb_priv {
- void __iomem *cfg_regs;
- bool has_cqe;
+#define SDIO_CFG_CTRL 0x0
+#define SDIO_CFG_CTRL_SDCD_N_TEST_EN BIT(31)
+#define SDIO_CFG_CTRL_SDCD_N_TEST_LEV BIT(30)
+#define SDIO_CFG_OP_DLY 0x34
+#define SDIO_CFG_OP_DLY_DEFAULT 0x80000003
+#define SDIO_CFG_CQ_CAPABILITY 0x4c
+#define SDIO_CFG_CQ_CAPABILITY_FMUL GENMASK(13, 12)
+#define SDIO_CFG_SD_PIN_SEL 0x44
+#define SDIO_CFG_V1_SD_PIN_SEL 0x54
+#define SDIO_CFG_PHY_SW_MODE_0_RX_CTRL 0x7C
+#define SDIO_CFG_MAX_50MHZ_MODE 0x1ac
+#define SDIO_CFG_MAX_50MHZ_MODE_STRAP_OVERRIDE BIT(31)
+#define SDIO_CFG_MAX_50MHZ_MODE_ENABLE BIT(0)
+
+#define SDIO_BOOT_MAIN_CTL 0x0
+
+#define MMC_CAP_HSE_MASK (MMC_CAP2_HSX00_1_2V | MMC_CAP2_HSX00_1_8V)
+/* Select all SD UHS type I SDR speed above 50MB/s */
+#define MMC_CAP_UHS_I_SDR_MASK (MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104)
+
+enum cfg_core_ver {
+ SDIO_CFG_CORE_V1 = 1,
+ SDIO_CFG_CORE_V2,
+};
+
+struct sdhci_brcmstb_saved_regs {
+ u32 sd_pin_sel;
+ u32 phy_sw_mode0_rxctrl;
+ u32 max_50mhz_mode;
+ u32 boot_main_ctl;
};
struct brcmstb_match_priv {
+ void (*cfginit)(struct sdhci_host *host);
void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios);
+ void (*save_restore_regs)(struct mmc_host *mmc, int save);
struct sdhci_ops *ops;
+ const unsigned int flags;
+};
+
+struct sdhci_brcmstb_priv {
+ void __iomem *cfg_regs;
+ void __iomem *boot_regs;
+ struct sdhci_brcmstb_saved_regs saved_regs;
unsigned int flags;
+ struct clk *base_clk;
+ u32 base_freq_hz;
+ const struct brcmstb_match_priv *match_priv;
};
+static void sdhci_brcmstb_save_regs(struct mmc_host *mmc, enum cfg_core_ver ver)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_brcmstb_saved_regs *sr = &priv->saved_regs;
+ void __iomem *cr = priv->cfg_regs;
+ bool is_emmc = mmc->caps & MMC_CAP_NONREMOVABLE;
+
+ if (is_emmc && priv->boot_regs)
+ sr->boot_main_ctl = readl(priv->boot_regs + SDIO_BOOT_MAIN_CTL);
+
+ if (ver == SDIO_CFG_CORE_V1) {
+ sr->sd_pin_sel = readl(cr + SDIO_CFG_V1_SD_PIN_SEL);
+ return;
+ }
+
+ sr->sd_pin_sel = readl(cr + SDIO_CFG_SD_PIN_SEL);
+ sr->phy_sw_mode0_rxctrl = readl(cr + SDIO_CFG_PHY_SW_MODE_0_RX_CTRL);
+ sr->max_50mhz_mode = readl(cr + SDIO_CFG_MAX_50MHZ_MODE);
+}
+
+static void sdhci_brcmstb_restore_regs(struct mmc_host *mmc, enum cfg_core_ver ver)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_brcmstb_saved_regs *sr = &priv->saved_regs;
+ void __iomem *cr = priv->cfg_regs;
+ bool is_emmc = mmc->caps & MMC_CAP_NONREMOVABLE;
+
+ if (is_emmc && priv->boot_regs)
+ writel(sr->boot_main_ctl, priv->boot_regs + SDIO_BOOT_MAIN_CTL);
+
+ if (ver == SDIO_CFG_CORE_V1) {
+ writel(sr->sd_pin_sel, cr + SDIO_CFG_SD_PIN_SEL);
+ return;
+ }
+
+ writel(sr->sd_pin_sel, cr + SDIO_CFG_SD_PIN_SEL);
+ writel(sr->phy_sw_mode0_rxctrl, cr + SDIO_CFG_PHY_SW_MODE_0_RX_CTRL);
+ writel(sr->max_50mhz_mode, cr + SDIO_CFG_MAX_50MHZ_MODE);
+}
+
+static void sdhci_brcmstb_save_restore_regs_v1(struct mmc_host *mmc, int save)
+{
+ if (save)
+ sdhci_brcmstb_save_regs(mmc, SDIO_CFG_CORE_V1);
+ else
+ sdhci_brcmstb_restore_regs(mmc, SDIO_CFG_CORE_V1);
+}
+
+static void sdhci_brcmstb_save_restore_regs_v2(struct mmc_host *mmc, int save)
+{
+ if (save)
+ sdhci_brcmstb_save_regs(mmc, SDIO_CFG_CORE_V2);
+ else
+ sdhci_brcmstb_restore_regs(mmc, SDIO_CFG_CORE_V2);
+}
+
+static inline void enable_clock_gating(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ if (!(priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK))
+ return;
+
+ reg = sdhci_readl(host, SDHCI_VENDOR);
+ reg |= SDHCI_VENDOR_GATE_SDCLK_EN;
+ sdhci_writel(host, reg, SDHCI_VENDOR);
+}
+
+static void brcmstb_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_and_cqhci_reset(host, mask);
+
+ /* Reset will clear this, so re-enable it */
+ enable_clock_gating(host);
+}
+
+static void brcmstb_sdhci_reset_cmd_data(struct sdhci_host *host, u8 mask)
+{
+ u32 new_mask = (mask & (SDHCI_RESET_CMD | SDHCI_RESET_DATA)) << 24;
+ int ret;
+ u32 reg;
+
+ /*
+ * SDHCI_CLOCK_CONTROL register CARD_EN and CLOCK_INT_EN bits shall
+ * be set along with SOFTWARE_RESET register RESET_CMD or RESET_DATA
+ * bits, hence access SDHCI_CLOCK_CONTROL register as 32-bit register
+ */
+ new_mask |= SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN;
+ reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL);
+ sdhci_writel(host, reg | new_mask, SDHCI_CLOCK_CONTROL);
+
+ reg = sdhci_readb(host, SDHCI_SOFTWARE_RESET);
+
+ ret = read_poll_timeout_atomic(sdhci_readb, reg, !(reg & mask),
+ 10, 10000, false,
+ host, SDHCI_SOFTWARE_RESET);
+
+ if (ret) {
+ pr_err("%s: Reset 0x%x never completed.\n",
+ mmc_hostname(host->mmc), (int)mask);
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
+ sdhci_dumpregs(host);
+ }
+}
+
+static void brcmstb_reset_74165b0(struct sdhci_host *host, u8 mask)
+{
+ /* take care of RESET_ALL as usual */
+ if (mask & SDHCI_RESET_ALL)
+ sdhci_and_cqhci_reset(host, SDHCI_RESET_ALL);
+
+ /* cmd and/or data treated differently on this core */
+ if (mask & (SDHCI_RESET_CMD | SDHCI_RESET_DATA))
+ brcmstb_sdhci_reset_cmd_data(host, mask);
+
+ /* Reset will clear this, so re-enable it */
+ enable_clock_gating(host);
+}
+
static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -94,6 +266,48 @@ static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host,
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
+static void sdhci_brcmstb_cfginit_2712(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *brcmstb_priv = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ /*
+ * If we support a speed that requires tuning,
+ * then select the delay line PHY as the clock source.
+ */
+ if ((host->mmc->caps & MMC_CAP_UHS_I_SDR_MASK) || (host->mmc->caps2 & MMC_CAP_HSE_MASK)) {
+ reg = readl(brcmstb_priv->cfg_regs + SDIO_CFG_MAX_50MHZ_MODE);
+ reg &= ~SDIO_CFG_MAX_50MHZ_MODE_ENABLE;
+ reg |= SDIO_CFG_MAX_50MHZ_MODE_STRAP_OVERRIDE;
+ writel(reg, brcmstb_priv->cfg_regs + SDIO_CFG_MAX_50MHZ_MODE);
+ }
+
+ if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
+ (host->mmc->caps & MMC_CAP_NEEDS_POLL)) {
+ /* Force presence */
+ reg = readl(brcmstb_priv->cfg_regs + SDIO_CFG_CTRL);
+ reg &= ~SDIO_CFG_CTRL_SDCD_N_TEST_LEV;
+ reg |= SDIO_CFG_CTRL_SDCD_N_TEST_EN;
+ writel(reg, brcmstb_priv->cfg_regs + SDIO_CFG_CTRL);
+ }
+}
+
+static void sdhci_brcmstb_set_72116_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ /* no change to SDIO_CFG_OP_DLY_DEFAULT when using preset clk rate */
+ if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
+ return;
+
+ reg = (timing == MMC_TIMING_MMC_HS200) ? 0 : SDIO_CFG_OP_DLY_DEFAULT;
+ writel(reg, priv->cfg_regs + SDIO_CFG_OP_DLY);
+ sdhci_set_uhs_signaling(host, timing);
+}
+
static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc)
{
sdhci_dumpregs(mmc_priv(mmc));
@@ -126,33 +340,85 @@ static struct sdhci_ops sdhci_brcmstb_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
+static struct sdhci_ops sdhci_brcmstb_ops_2712 = {
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_set_power_and_bus_voltage,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static struct sdhci_ops sdhci_brcmstb_ops_72116 = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_brcmstb_set_72116_uhs_signaling,
+};
+
static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
.set_clock = sdhci_brcmstb_set_clock,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = brcmstb_reset,
+ .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
+};
+
+static struct sdhci_ops sdhci_brcmstb_ops_74165b0 = {
+ .set_clock = sdhci_brcmstb_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = brcmstb_reset_74165b0,
.set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
};
+static const struct brcmstb_match_priv match_priv_2712 = {
+ .cfginit = sdhci_brcmstb_cfginit_2712,
+ .ops = &sdhci_brcmstb_ops_2712,
+};
+
static struct brcmstb_match_priv match_priv_7425 = {
- .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT |
- BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
+ .flags = BRCMSTB_MATCH_FLAGS_NO_64BIT |
+ BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT,
+ .ops = &sdhci_brcmstb_ops,
+};
+
+static struct brcmstb_match_priv match_priv_74371 = {
+ .flags = BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT,
.ops = &sdhci_brcmstb_ops,
};
static struct brcmstb_match_priv match_priv_7445 = {
- .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
+ .flags = BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT,
+ .save_restore_regs = sdhci_brcmstb_save_restore_regs_v1,
.ops = &sdhci_brcmstb_ops,
};
+static struct brcmstb_match_priv match_priv_72116 = {
+ .flags = BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT,
+ .save_restore_regs = sdhci_brcmstb_save_restore_regs_v1,
+ .ops = &sdhci_brcmstb_ops_72116,
+};
+
static const struct brcmstb_match_priv match_priv_7216 = {
+ .flags = BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE,
+ .save_restore_regs = sdhci_brcmstb_save_restore_regs_v2,
.hs400es = sdhci_brcmstb_hs400es,
.ops = &sdhci_brcmstb_ops_7216,
};
-static const struct of_device_id sdhci_brcm_of_match[] = {
+static struct brcmstb_match_priv match_priv_74165b0 = {
+ .flags = BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE,
+ .save_restore_regs = sdhci_brcmstb_save_restore_regs_v2,
+ .hs400es = sdhci_brcmstb_hs400es,
+ .ops = &sdhci_brcmstb_ops_74165b0,
+};
+
+static const struct of_device_id __maybe_unused sdhci_brcm_of_match[] = {
+ { .compatible = "brcm,bcm2712-sdhci", .data = &match_priv_2712 },
{ .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 },
+ { .compatible = "brcm,bcm74371-sdhci", .data = &match_priv_74371 },
{ .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 },
+ { .compatible = "brcm,bcm72116-sdhci", .data = &match_priv_72116 },
{ .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 },
+ { .compatible = "brcm,bcm74165b0-sdhci", .data = &match_priv_74165b0 },
{},
};
@@ -176,7 +442,7 @@ static int sdhci_brcmstb_add_host(struct sdhci_host *host,
bool dma64;
int ret;
- if (!priv->has_cqe)
+ if ((priv->flags & BRCMSTB_PRIV_FLAGS_HAS_CQE) == 0)
return sdhci_add_host(host);
dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n");
@@ -223,10 +489,10 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
const struct of_device_id *match;
struct sdhci_brcmstb_priv *priv;
+ u32 actual_clock_mhz;
struct sdhci_host *host;
- struct resource *iomem;
- bool has_cqe = false;
struct clk *clk;
+ struct clk *base_clk = NULL;
int res;
match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node);
@@ -234,35 +500,28 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible);
- clk = devm_clk_get_optional(&pdev->dev, NULL);
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(&pdev->dev, PTR_ERR(clk),
- "Failed to get clock from Device Tree\n");
-
- res = clk_prepare_enable(clk);
- if (res)
- return res;
+ "Failed to get and enable clock from Device Tree\n");
memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata));
- if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
- has_cqe = true;
- match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
- }
brcmstb_pdata.ops = match_priv->ops;
host = sdhci_pltfm_init(pdev, &brcmstb_pdata,
sizeof(struct sdhci_brcmstb_priv));
- if (IS_ERR(host)) {
- res = PTR_ERR(host);
- goto err_clk;
- }
+ if (IS_ERR(host))
+ return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
- priv->has_cqe = has_cqe;
+ priv->match_priv = match->data;
+ if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
+ priv->flags |= BRCMSTB_PRIV_FLAGS_HAS_CQE;
+ match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
+ }
/* Map in the non-standard CFG registers */
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem);
+ priv->cfg_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
if (IS_ERR(priv->cfg_regs)) {
res = PTR_ERR(priv->cfg_regs);
goto err;
@@ -273,6 +532,21 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (res)
goto err;
+ /* map non-standard BOOT registers if present */
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE) {
+ priv->boot_regs = devm_platform_get_and_ioremap_resource(pdev, 2, NULL);
+ if (IS_ERR(priv->boot_regs))
+ priv->boot_regs = NULL;
+ }
+
+ /*
+ * Automatic clock gating does not work for SD cards that may
+ * voltage switch so only enable it for non-removable devices.
+ */
+ if ((match_priv->flags & BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE) &&
+ (host->mmc->caps & MMC_CAP_NONREMOVABLE))
+ priv->flags |= BRCMSTB_PRIV_FLAGS_GATE_CLOCK;
+
/*
* If the chip has enhanced strobe and it's enabled, add
* callback
@@ -281,22 +555,55 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
(host->mmc->caps2 & MMC_CAP2_HS400_ES))
host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es;
+ if (match_priv->cfginit)
+ match_priv->cfginit(host);
+
/*
* Supply the existing CAPS, but clear the UHS modes. This
* will allow these modes to be specified by device tree
* properties through mmc_of_parse().
*/
- host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
- if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT)
+ sdhci_read_caps(host);
+ if (match_priv->flags & BRCMSTB_MATCH_FLAGS_NO_64BIT)
host->caps &= ~SDHCI_CAN_64BIT;
- host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50);
- host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
- if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT)
+ if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ if (!(match_priv->flags & BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY))
+ host->mmc_host_ops.card_busy = NULL;
+
+ /* Change the base clock frequency if the DT property exists */
+ if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ &priv->base_freq_hz) != 0)
+ goto add_host;
+
+ base_clk = devm_clk_get_optional(&pdev->dev, "sdio_freq");
+ if (IS_ERR(base_clk)) {
+ dev_warn(&pdev->dev, "Clock for \"sdio_freq\" not found\n");
+ goto add_host;
+ }
+
+ res = clk_prepare_enable(base_clk);
+ if (res)
+ goto err;
+
+ /* set improved clock rate */
+ clk_set_rate(base_clk, priv->base_freq_hz);
+ actual_clock_mhz = clk_get_rate(base_clk) / 1000000;
+
+ host->caps &= ~SDHCI_CLOCK_V3_BASE_MASK;
+ host->caps |= (actual_clock_mhz << SDHCI_CLOCK_BASE_SHIFT);
+ /* Disable presets because they are now incorrect */
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+
+ dev_dbg(&pdev->dev, "Base Clock Frequency changed to %dMHz\n",
+ actual_clock_mhz);
+ priv->base_clk = base_clk;
+
+add_host:
res = sdhci_brcmstb_add_host(host, priv);
if (res)
goto err;
@@ -305,9 +612,7 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
return res;
err:
- sdhci_pltfm_free(pdev);
-err_clk:
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(base_clk);
return res;
}
@@ -318,15 +623,70 @@ static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
+static int sdhci_brcmstb_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ const struct brcmstb_match_priv *match_priv = priv->match_priv;
+
+ int ret;
+
+ if (match_priv->save_restore_regs)
+ match_priv->save_restore_regs(host->mmc, 1);
+
+ clk_disable_unprepare(priv->base_clk);
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
+ return sdhci_pltfm_suspend(dev);
+}
+
+static int sdhci_brcmstb_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ const struct brcmstb_match_priv *match_priv = priv->match_priv;
+ int ret;
+
+ ret = sdhci_pltfm_resume(dev);
+ if (!ret && priv->base_freq_hz) {
+ ret = clk_prepare_enable(priv->base_clk);
+ /*
+ * Note: using clk_get_rate() below as clk_get_rate()
+ * honors CLK_GET_RATE_NOCACHE attribute, but clk_set_rate()
+ * may do implicit get_rate() calls that do not honor
+ * CLK_GET_RATE_NOCACHE.
+ */
+ if (!ret &&
+ (clk_get_rate(priv->base_clk) != priv->base_freq_hz))
+ ret = clk_set_rate(priv->base_clk, priv->base_freq_hz);
+ }
+
+ if (match_priv->save_restore_regs)
+ match_priv->save_restore_regs(host->mmc, 0);
+
+ if (host->mmc->caps2 & MMC_CAP2_CQE)
+ ret = cqhci_resume(host->mmc);
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(sdhci_brcmstb_pmops, sdhci_brcmstb_suspend, sdhci_brcmstb_resume);
+
static struct platform_driver sdhci_brcmstb_driver = {
.driver = {
.name = "sdhci-brcmstb",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_pltfm_pmops,
+ .pm = pm_sleep_ptr(&sdhci_brcmstb_pmops),
.of_match_table = of_match_ptr(sdhci_brcm_of_match),
},
.probe = sdhci_brcmstb_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove = sdhci_pltfm_remove,
.shutdown = sdhci_brcmstb_shutdown,
};
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 6f2de54a5987..435603c8c00b 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -11,7 +11,8 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
#include "sdhci-pltfm.h"
@@ -35,6 +36,24 @@
#define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5
#define SDHCI_CDNS_HRS06_MODE_MMC_HS400ES 0x6
+/* Read block gap */
+#define SDHCI_CDNS_HRS37 0x94 /* interface mode select */
+#define SDHCI_CDNS_HRS37_MODE_DS 0x0
+#define SDHCI_CDNS_HRS37_MODE_HS 0x1
+#define SDHCI_CDNS_HRS37_MODE_UDS_SDR12 0x8
+#define SDHCI_CDNS_HRS37_MODE_UDS_SDR25 0x9
+#define SDHCI_CDNS_HRS37_MODE_UDS_SDR50 0xa
+#define SDHCI_CDNS_HRS37_MODE_UDS_SDR104 0xb
+#define SDHCI_CDNS_HRS37_MODE_UDS_DDR50 0xc
+#define SDHCI_CDNS_HRS37_MODE_MMC_LEGACY 0x20
+#define SDHCI_CDNS_HRS37_MODE_MMC_SDR 0x21
+#define SDHCI_CDNS_HRS37_MODE_MMC_DDR 0x22
+#define SDHCI_CDNS_HRS37_MODE_MMC_HS200 0x23
+#define SDHCI_CDNS_HRS37_MODE_MMC_HS400 0x24
+#define SDHCI_CDNS_HRS37_MODE_MMC_HS400ES 0x25
+#define SDHCI_CDNS_HRS38 0x98 /* Read block gap coefficient */
+#define SDHCI_CDNS_HRS38_BLKGAP_MAX 0xf
+
/* SRS - Slot Register Set (SDHCI-compatible) */
#define SDHCI_CDNS_SRS_BASE 0x200
@@ -66,7 +85,11 @@ struct sdhci_cdns_phy_param {
struct sdhci_cdns_priv {
void __iomem *hrs_addr;
+ void __iomem *ctl_addr; /* write control */
+ spinlock_t wrlock; /* write lock */
bool enhanced_strobe;
+ void (*priv_writel)(struct sdhci_cdns_priv *priv, u32 val, void __iomem *reg);
+ struct reset_control *rst_hw;
unsigned int nr_phy_params;
struct sdhci_cdns_phy_param phy_params[];
};
@@ -76,6 +99,11 @@ struct sdhci_cdns_phy_cfg {
u8 addr;
};
+struct sdhci_cdns_drv_data {
+ int (*init)(struct platform_device *pdev);
+ const struct sdhci_pltfm_data pltfm_data;
+};
+
static const struct sdhci_cdns_phy_cfg sdhci_cdns_phy_cfgs[] = {
{ "cdns,phy-input-delay-sd-highspeed", SDHCI_CDNS_PHY_DLY_SD_HS, },
{ "cdns,phy-input-delay-legacy", SDHCI_CDNS_PHY_DLY_SD_DEFAULT, },
@@ -90,6 +118,12 @@ static const struct sdhci_cdns_phy_cfg sdhci_cdns_phy_cfgs[] = {
{ "cdns,phy-dll-delay-strobe", SDHCI_CDNS_PHY_DLY_STROBE, },
};
+static inline void cdns_writel(struct sdhci_cdns_priv *priv, u32 val,
+ void __iomem *reg)
+{
+ writel(val, reg);
+}
+
static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
u8 addr, u8 data)
{
@@ -104,17 +138,17 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) |
FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr);
- writel(tmp, reg);
+ priv->priv_writel(priv, tmp, reg);
tmp |= SDHCI_CDNS_HRS04_WR;
- writel(tmp, reg);
+ priv->priv_writel(priv, tmp, reg);
ret = readl_poll_timeout(reg, tmp, tmp & SDHCI_CDNS_HRS04_ACK, 0, 10);
if (ret)
return ret;
tmp &= ~SDHCI_CDNS_HRS04_WR;
- writel(tmp, reg);
+ priv->priv_writel(priv, tmp, reg);
ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS04_ACK),
0, 10);
@@ -128,7 +162,7 @@ static unsigned int sdhci_cdns_phy_param_count(struct device_node *np)
int i;
for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++)
- if (of_property_read_bool(np, sdhci_cdns_phy_cfgs[i].property))
+ if (of_property_present(np, sdhci_cdns_phy_cfgs[i].property))
count++;
return count;
@@ -191,7 +225,7 @@ static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode)
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
tmp &= ~SDHCI_CDNS_HRS06_MODE;
tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode);
- writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
+ priv->priv_writel(priv, tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
}
static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
@@ -223,7 +257,7 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
*/
for (i = 0; i < 2; i++) {
tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
- writel(tmp, reg);
+ priv->priv_writel(priv, tmp, reg);
ret = readl_poll_timeout(reg, tmp,
!(tmp & SDHCI_CDNS_HRS06_TUNE_UP),
@@ -235,6 +269,43 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
return 0;
}
+/**
+ * sdhci_cdns_tune_blkgap() - tune multi-block read gap
+ * @mmc: MMC host
+ *
+ * Tune delay used in multi block read. To do so,
+ * try sending multi-block read command with incremented gap, unless
+ * it succeeds.
+ *
+ * Return: error code
+ */
+static int sdhci_cdns_tune_blkgap(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_cdns_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ void __iomem *hrs37_reg = priv->hrs_addr + SDHCI_CDNS_HRS37;
+ void __iomem *hrs38_reg = priv->hrs_addr + SDHCI_CDNS_HRS38;
+ int ret;
+ u32 gap;
+
+ /* Currently only needed in HS200 mode */
+ if (host->timing != MMC_TIMING_MMC_HS200)
+ return 0;
+
+ writel(SDHCI_CDNS_HRS37_MODE_MMC_HS200, hrs37_reg);
+
+ for (gap = 0; gap <= SDHCI_CDNS_HRS38_BLKGAP_MAX; gap++) {
+ writel(gap, hrs38_reg);
+ ret = mmc_read_tuning(mmc, 512, 32);
+ if (!ret)
+ break;
+ }
+
+ dev_dbg(mmc_dev(mmc), "read block gap tune %s, gap %d\n", ret ? "failed" : "OK", gap);
+ return ret;
+}
+
/*
* In SD mode, software must not use the hardware tuning and instead perform
* an almost identical procedure to eMMC.
@@ -245,6 +316,7 @@ static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
int max_streak = 0;
int end_of_streak = 0;
int i;
+ int ret;
/*
* Do not execute tuning for UHS_SDR50 or UHS_DDR50.
@@ -272,7 +344,11 @@ static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
return -EIO;
}
- return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
+ ret = sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
+ if (ret)
+ return ret;
+
+ return sdhci_cdns_tune_blkgap(host->mmc);
}
static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
@@ -309,6 +385,91 @@ static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
sdhci_set_uhs_signaling(host, timing);
}
+/* Elba control register bits [6:3] are byte-lane enables */
+#define ELBA_BYTE_ENABLE_MASK(x) ((x) << 3)
+
+/*
+ * The Pensando Elba SoC explicitly controls byte-lane enabling on writes
+ * which includes writes to the HRS registers. The write lock (wrlock)
+ * is used to ensure byte-lane enable, using write control (ctl_addr),
+ * occurs before the data write.
+ */
+static void elba_priv_writel(struct sdhci_cdns_priv *priv, u32 val,
+ void __iomem *reg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->wrlock, flags);
+ writel(GENMASK(7, 3), priv->ctl_addr);
+ writel(val, reg);
+ spin_unlock_irqrestore(&priv->wrlock, flags);
+}
+
+static void elba_write_l(struct sdhci_host *host, u32 val, int reg)
+{
+ elba_priv_writel(sdhci_cdns_priv(host), val, host->ioaddr + reg);
+}
+
+static void elba_write_w(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ u32 shift = reg & GENMASK(1, 0);
+ unsigned long flags;
+ u32 byte_enables;
+
+ byte_enables = GENMASK(1, 0) << shift;
+ spin_lock_irqsave(&priv->wrlock, flags);
+ writel(ELBA_BYTE_ENABLE_MASK(byte_enables), priv->ctl_addr);
+ writew(val, host->ioaddr + reg);
+ spin_unlock_irqrestore(&priv->wrlock, flags);
+}
+
+static void elba_write_b(struct sdhci_host *host, u8 val, int reg)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ u32 shift = reg & GENMASK(1, 0);
+ unsigned long flags;
+ u32 byte_enables;
+
+ byte_enables = BIT(0) << shift;
+ spin_lock_irqsave(&priv->wrlock, flags);
+ writel(ELBA_BYTE_ENABLE_MASK(byte_enables), priv->ctl_addr);
+ writeb(val, host->ioaddr + reg);
+ spin_unlock_irqrestore(&priv->wrlock, flags);
+}
+
+static const struct sdhci_ops sdhci_elba_ops = {
+ .write_l = elba_write_l,
+ .write_w = elba_write_w,
+ .write_b = elba_write_b,
+ .set_clock = sdhci_set_clock,
+ .get_timeout_clock = sdhci_cdns_get_timeout_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
+};
+
+static int elba_drv_init(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ void __iomem *ioaddr;
+
+ host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA;
+ spin_lock_init(&priv->wrlock);
+
+ /* Byte-lane control register */
+ ioaddr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(ioaddr))
+ return PTR_ERR(ioaddr);
+
+ priv->ctl_addr = ioaddr;
+ priv->priv_writel = elba_priv_writel;
+ writel(ELBA_BYTE_ENABLE_MASK(0xf), priv->ctl_addr);
+
+ return 0;
+}
+
static const struct sdhci_ops sdhci_cdns_ops = {
.set_clock = sdhci_set_clock,
.get_timeout_clock = sdhci_cdns_get_timeout_clock,
@@ -318,13 +479,31 @@ static const struct sdhci_ops sdhci_cdns_ops = {
.set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
};
-static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = {
- .ops = &sdhci_cdns_ops,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+static const struct sdhci_cdns_drv_data sdhci_cdns_uniphier_drv_data = {
+ .pltfm_data = {
+ .ops = &sdhci_cdns_ops,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ },
+};
+
+static const struct sdhci_cdns_drv_data sdhci_elba_drv_data = {
+ .init = elba_drv_init,
+ .pltfm_data = {
+ .ops = &sdhci_elba_ops,
+ },
+};
+
+static const struct sdhci_cdns_drv_data sdhci_eyeq_drv_data = {
+ .pltfm_data = {
+ .ops = &sdhci_cdns_ops,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ },
};
-static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
- .ops = &sdhci_cdns_ops,
+static const struct sdhci_cdns_drv_data sdhci_cdns_drv_data = {
+ .pltfm_data = {
+ .ops = &sdhci_cdns_ops,
+ },
};
static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
@@ -347,10 +526,26 @@ static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
SDHCI_CDNS_HRS06_MODE_MMC_HS400);
}
+static void sdhci_cdns_mmc_hw_reset(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+
+ dev_dbg(mmc_dev(host->mmc), "emmc hardware reset\n");
+
+ reset_control_assert(priv->rst_hw);
+ /* For eMMC, minimum is 1us but give it 3us for good measure */
+ udelay(3);
+
+ reset_control_deassert(priv->rst_hw);
+ /* For eMMC, minimum is 200us but give it 300us for good measure */
+ usleep_range(300, 1000);
+}
+
static int sdhci_cdns_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
- const struct sdhci_pltfm_data *data;
+ const struct sdhci_cdns_drv_data *data;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_cdns_priv *priv;
struct clk *clk;
@@ -359,25 +554,19 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
static const u16 version = SDHCI_SPEC_400 << SDHCI_SPEC_VER_SHIFT;
- clk = devm_clk_get(dev, NULL);
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
data = of_device_get_match_data(dev);
if (!data)
- data = &sdhci_cdns_pltfm_data;
+ data = &sdhci_cdns_drv_data;
nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node);
- host = sdhci_pltfm_init(pdev, data,
+ host = sdhci_pltfm_init(pdev, &data->pltfm_data,
struct_size(priv, phy_params, nr_phy_params));
- if (IS_ERR(host)) {
- ret = PTR_ERR(host);
- goto disable_clk;
- }
+ if (IS_ERR(host))
+ return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
pltfm_host->clk = clk;
@@ -386,9 +575,15 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
priv->nr_phy_params = nr_phy_params;
priv->hrs_addr = host->ioaddr;
priv->enhanced_strobe = false;
+ priv->priv_writel = cdns_writel;
host->ioaddr += SDHCI_CDNS_SRS_BASE;
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_cdns_hs400_enhanced_strobe;
+ if (data->init) {
+ ret = data->init(pdev);
+ if (ret)
+ return ret;
+ }
sdhci_enable_v4_mode(host);
__sdhci_read_caps(host, &version, NULL, NULL);
@@ -396,28 +591,26 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
ret = mmc_of_parse(host->mmc);
if (ret)
- goto free;
+ return ret;
sdhci_cdns_phy_param_parse(dev->of_node, priv);
ret = sdhci_cdns_phy_init(priv);
if (ret)
- goto free;
-
- ret = sdhci_add_host(host);
- if (ret)
- goto free;
+ return ret;
- return 0;
-free:
- sdhci_pltfm_free(pdev);
-disable_clk:
- clk_disable_unprepare(clk);
+ if (host->mmc->caps & MMC_CAP_HW_RESET) {
+ priv->rst_hw = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst_hw))
+ return dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw),
+ "reset controller error\n");
+ if (priv->rst_hw)
+ host->mmc_host_ops.card_hw_reset = sdhci_cdns_mmc_hw_reset;
+ }
- return ret;
+ return sdhci_add_host(host);
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_cdns_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -444,16 +637,21 @@ disable_clk:
return ret;
}
-#endif
-static const struct dev_pm_ops sdhci_cdns_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_cdns_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(sdhci_cdns_pm_ops, sdhci_pltfm_suspend, sdhci_cdns_resume);
static const struct of_device_id sdhci_cdns_match[] = {
{
.compatible = "socionext,uniphier-sd4hc",
- .data = &sdhci_cdns_uniphier_pltfm_data,
+ .data = &sdhci_cdns_uniphier_drv_data,
+ },
+ {
+ .compatible = "amd,pensando-elba-sd4hc",
+ .data = &sdhci_elba_drv_data,
+ },
+ {
+ .compatible = "mobileye,eyeq-sd4hc",
+ .data = &sdhci_eyeq_drv_data,
},
{ .compatible = "cdns,sd4hc" },
{ /* sentinel */ }
@@ -464,11 +662,11 @@ static struct platform_driver sdhci_cdns_driver = {
.driver = {
.name = "sdhci-cdns",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_cdns_pm_ops,
+ .pm = pm_sleep_ptr(&sdhci_cdns_pm_ops),
.of_match_table = sdhci_cdns_match,
},
.probe = sdhci_cdns_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_cdns_driver);
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
deleted file mode 100644
index 2a29c7a4f308..000000000000
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SDHCI support for CNS3xxx SoC
- *
- * Copyright 2008 Cavium Networks
- * Copyright 2010 MontaVista Software, LLC.
- *
- * Authors: Scott Shu
- * Anton Vorontsov <avorontsov@mvista.com>
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/mmc/host.h>
-#include <linux/module.h>
-#include "sdhci-pltfm.h"
-
-static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host)
-{
- return 150000000;
-}
-
-static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock)
-{
- struct device *dev = mmc_dev(host->mmc);
- int div = 1;
- u16 clk;
- unsigned long timeout;
-
- host->mmc->actual_clock = 0;
-
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
-
- if (clock == 0)
- return;
-
- while (host->max_clk / div > clock) {
- /*
- * On CNS3xxx divider grows linearly up to 4, and then
- * exponentially up to 256.
- */
- if (div < 4)
- div += 1;
- else if (div < 256)
- div *= 2;
- else
- break;
- }
-
- dev_dbg(dev, "desired SD clock: %d, actual: %d\n",
- clock, host->max_clk / div);
-
- /* Divide by 3 is special. */
- if (div != 3)
- div >>= 1;
-
- clk = div << SDHCI_DIVIDER_SHIFT;
- clk |= SDHCI_CLOCK_INT_EN;
- sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
-
- timeout = 20;
- while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
- & SDHCI_CLOCK_INT_STABLE)) {
- if (timeout == 0) {
- dev_warn(dev, "clock is unstable");
- break;
- }
- timeout--;
- mdelay(1);
- }
-
- clk |= SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
-}
-
-static const struct sdhci_ops sdhci_cns3xxx_ops = {
- .get_max_clock = sdhci_cns3xxx_get_max_clk,
- .set_clock = sdhci_cns3xxx_set_clock,
- .set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
-};
-
-static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
- .ops = &sdhci_cns3xxx_ops,
- .quirks = SDHCI_QUIRK_BROKEN_DMA |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
- SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
-};
-
-static int sdhci_cns3xxx_probe(struct platform_device *pdev)
-{
- return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata, 0);
-}
-
-static struct platform_driver sdhci_cns3xxx_driver = {
- .driver = {
- .name = "sdhci-cns3xxx",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_pltfm_pmops,
- },
- .probe = sdhci_cns3xxx_probe,
- .remove = sdhci_pltfm_unregister,
-};
-
-module_platform_driver(sdhci_cns3xxx_driver);
-
-MODULE_DESCRIPTION("SDHCI driver for CNS3xxx");
-MODULE_AUTHOR("Scott Shu, "
- "Anton Vorontsov <avorontsov@mvista.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-cqhci.h b/drivers/mmc/host/sdhci-cqhci.h
new file mode 100644
index 000000000000..cf8e7ba71bbd
--- /dev/null
+++ b/drivers/mmc/host/sdhci-cqhci.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2022 The Chromium OS Authors
+ *
+ * Support that applies to the combination of SDHCI and CQHCI, while not
+ * expressing a dependency between the two modules.
+ */
+
+#ifndef __MMC_HOST_SDHCI_CQHCI_H__
+#define __MMC_HOST_SDHCI_CQHCI_H__
+
+#include "cqhci.h"
+#include "sdhci.h"
+
+static inline void sdhci_and_cqhci_reset(struct sdhci_host *host, u8 mask)
+{
+ if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
+ host->mmc->cqe_private)
+ cqhci_deactivate(host->mmc);
+
+ sdhci_reset(host, mask);
+}
+
+#endif /* __MMC_HOST_SDHCI_CQHCI_H__ */
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 5e5bf82e5976..dbfaee4a5ada 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -75,25 +75,13 @@ static int sdhci_dove_probe(struct platform_device *pdev)
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
-
- if (!IS_ERR(pltfm_host->clk))
- clk_prepare_enable(pltfm_host->clk);
+ pltfm_host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
ret = mmc_of_parse(host->mmc);
if (ret)
- goto err_sdhci_add;
-
- ret = sdhci_add_host(host);
- if (ret)
- goto err_sdhci_add;
+ return ret;
- return 0;
-
-err_sdhci_add:
- clk_disable_unprepare(pltfm_host->clk);
- sdhci_pltfm_free(pdev);
- return ret;
+ return sdhci_add_host(host);
}
static const struct of_device_id sdhci_dove_of_match_table[] = {
@@ -110,7 +98,7 @@ static struct platform_driver sdhci_dove_driver = {
.of_match_table = sdhci_dove_of_match_table,
},
.probe = sdhci_dove_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_dove_driver);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 72c0bf0c1887..a7a5df673b0f 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -22,15 +22,18 @@
#include <linux/mmc/sdio.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/platform_data/mmc-esdhc-imx.h>
#include <linux/pm_runtime.h>
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
#include "cqhci.h"
-#define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f
+#define ESDHC_SYS_CTRL_DTOCV_MASK GENMASK(19, 16)
+#define ESDHC_SYS_CTRL_RST_FIFO BIT(22)
+#define ESDHC_SYS_CTRL_IPP_RST_N BIT(23)
+#define ESDHC_SYS_CTRL_RESET_TUNING BIT(28)
#define ESDHC_CTRL_D3CD 0x08
#define ESDHC_BURST_LEN_EN_INCR (1 << 27)
/* VENDOR SPEC register */
@@ -80,7 +83,11 @@
#define ESDHC_TUNE_CTRL_STEP 1
#define ESDHC_TUNE_CTRL_MIN 0
#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1)
-
+#define ESDHC_TUNE_CTRL_STATUS_TAP_SEL_MASK GENMASK(30, 16)
+#define ESDHC_TUNE_CTRL_STATUS_TAP_SEL_PRE_MASK GENMASK(30, 24)
+#define ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_PRE_MASK GENMASK(14, 8)
+#define ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_OUT_MASK GENMASK(7, 4)
+#define ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_POST_MASK GENMASK(3, 0)
/* strobe dll register */
#define ESDHC_STROBE_DLL_CTRL 0x70
#define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0)
@@ -95,13 +102,20 @@
#define ESDHC_VEND_SPEC2 0xc8
#define ESDHC_VEND_SPEC2_EN_BUSY_IRQ (1 << 8)
+#define ESDHC_VEND_SPEC2_AUTO_TUNE_8BIT_EN (1 << 4)
+#define ESDHC_VEND_SPEC2_AUTO_TUNE_4BIT_EN (0 << 4)
+#define ESDHC_VEND_SPEC2_AUTO_TUNE_1BIT_EN (2 << 4)
+#define ESDHC_VEND_SPEC2_AUTO_TUNE_CMD_EN (1 << 6)
+#define ESDHC_VEND_SPEC2_AUTO_TUNE_MODE_MASK (7 << 4)
#define ESDHC_TUNING_CTRL 0xcc
#define ESDHC_STD_TUNING_EN (1 << 24)
+#define ESDHC_TUNING_WINDOW_MASK GENMASK(22, 20)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
#define ESDHC_TUNING_START_TAP_MASK 0x7f
#define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7)
+#define ESDHC_TUNING_STEP_DEFAULT 0x1
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16
@@ -115,6 +129,7 @@
#define ESDHC_CTRL_4BITBUS (0x1 << 1)
#define ESDHC_CTRL_8BITBUS (0x2 << 1)
#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
+#define USDHC_GET_BUSWIDTH(c) (c & ESDHC_CTRL_BUSWIDTH_MASK)
/*
* There is an INT DMA ERR mismatch between eSDHC and STD SDHC SPEC:
@@ -164,8 +179,8 @@
#define ESDHC_FLAG_HS400 BIT(9)
/*
* The IP has errata ERR010450
- * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't
- * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
+ * uSDHC: At 1.8V due to the I/O timing limit, for SDR mode, SD card
+ * clock can't exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
*/
#define ESDHC_FLAG_ERR010450 BIT(10)
/* The IP supports HS400ES mode */
@@ -191,8 +206,54 @@
*/
#define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16)
+/* ERR004536 is not applicable for the IP */
+#define ESDHC_FLAG_SKIP_ERR004536 BIT(17)
+
+/* The IP does not have GPIO CD wake capabilities */
+#define ESDHC_FLAG_SKIP_CD_WAKE BIT(18)
+
+/* the controller has dummy pad for clock loopback */
+#define ESDHC_FLAG_DUMMY_PAD BIT(19)
+
+#define ESDHC_AUTO_TUNING_WINDOW 3
+
+enum wp_types {
+ ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
+ ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
+ ESDHC_WP_GPIO, /* external gpio pin for WP */
+};
+
+enum cd_types {
+ ESDHC_CD_NONE, /* no CD, neither controller nor gpio */
+ ESDHC_CD_CONTROLLER, /* mmc controller internal CD */
+ ESDHC_CD_GPIO, /* external gpio pin for CD */
+ ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */
+};
+
+/*
+ * struct esdhc_platform_data - platform data for esdhc on i.MX
+ *
+ * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35.
+ *
+ * @wp_type: type of write_protect method (see wp_types enum above)
+ * @cd_type: type of card_detect method (see cd_types enum above)
+ */
+
+struct esdhc_platform_data {
+ enum wp_types wp_type;
+ enum cd_types cd_type;
+ int max_bus_width;
+ unsigned int delay_line;
+ unsigned int tuning_step; /* The delay cell steps in tuning procedure */
+ unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
+ unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */
+ unsigned int saved_tuning_delay_cell; /* save the value of tuning delay cell */
+ unsigned int saved_auto_tuning_window; /* save the auto tuning window width */
+};
+
struct esdhc_soc_data {
u32 flags;
+ u32 quirks;
};
static const struct esdhc_soc_data esdhc_imx25_data = {
@@ -217,63 +278,86 @@ static const struct esdhc_soc_data usdhc_imx6q_data = {
};
static const struct esdhc_soc_data usdhc_imx6sl_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536
| ESDHC_FLAG_HS200
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
};
static const struct esdhc_soc_data usdhc_imx6sll_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
static const struct esdhc_soc_data usdhc_imx6sx_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
};
static const struct esdhc_soc_data usdhc_imx6ull_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_ERR010450
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
static const struct esdhc_soc_data usdhc_imx7d_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
};
+static struct esdhc_soc_data usdhc_s32g2_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ | ESDHC_FLAG_SKIP_ERR004536 | ESDHC_FLAG_SKIP_CD_WAKE,
+ .quirks = SDHCI_QUIRK_NO_LED,
+};
+
static struct esdhc_soc_data usdhc_imx7ulp_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
+ .quirks = SDHCI_QUIRK_NO_LED,
+};
+static struct esdhc_soc_data usdhc_imxrt1050_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200,
+ .quirks = SDHCI_QUIRK_NO_LED,
};
static struct esdhc_soc_data usdhc_imx8qxp_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
- | ESDHC_FLAG_CQHCI
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
| ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
+ .quirks = SDHCI_QUIRK_NO_LED,
};
static struct esdhc_soc_data usdhc_imx8mm_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
- | ESDHC_FLAG_CQHCI
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
+ .quirks = SDHCI_QUIRK_NO_LED,
+};
+
+static struct esdhc_soc_data usdhc_imx95_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ | ESDHC_FLAG_STATE_LOST_IN_LPMODE
+ | ESDHC_FLAG_DUMMY_PAD,
+ .quirks = SDHCI_QUIRK_NO_LED,
};
struct pltfm_imx_data {
@@ -287,6 +371,16 @@ struct pltfm_imx_data {
struct clk *clk_ahb;
struct clk *clk_per;
unsigned int actual_clock;
+
+ /*
+ * USDHC has one limition, require the SDIO device a different
+ * register setting. Driver has to recognize card type during
+ * the card init, but at this stage, mmc_host->card is not
+ * available. So involve this field to save the card type
+ * during card init through usdhc_init_card().
+ */
+ unsigned int init_card_type;
+
enum {
NO_CMD_PENDING, /* no multiblock command pending */
MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
@@ -310,6 +404,10 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, },
{ .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
{ .compatible = "fsl,imx8mm-usdhc", .data = &usdhc_imx8mm_data, },
+ { .compatible = "fsl,imx94-usdhc", .data = &usdhc_imx95_data, },
+ { .compatible = "fsl,imx95-usdhc", .data = &usdhc_imx95_data, },
+ { .compatible = "fsl,imxrt1050-usdhc", .data = &usdhc_imxrt1050_data, },
+ { .compatible = "nxp,s32g2-usdhc", .data = &usdhc_s32g2_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
@@ -376,6 +474,51 @@ static inline void esdhc_wait_for_card_clock_gate_off(struct sdhci_host *host)
dev_warn(mmc_dev(host->mmc), "%s: card clock still not gate off in 100us!.\n", __func__);
}
+/* Enable the auto tuning circuit to check the CMD line and BUS line */
+static inline void usdhc_auto_tuning_mode_sel_and_en(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ u32 buswidth, auto_tune_buswidth;
+ u32 reg;
+
+ buswidth = USDHC_GET_BUSWIDTH(readl(host->ioaddr + SDHCI_HOST_CONTROL));
+
+ switch (buswidth) {
+ case ESDHC_CTRL_8BITBUS:
+ auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_8BIT_EN;
+ break;
+ case ESDHC_CTRL_4BITBUS:
+ auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_4BIT_EN;
+ break;
+ default: /* 1BITBUS */
+ auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_1BIT_EN;
+ break;
+ }
+
+ /*
+ * For USDHC, auto tuning circuit can not handle the async sdio
+ * device interrupt correctly. When sdio device use 4 data lines,
+ * async sdio interrupt will use the shared DAT[1], if enable auto
+ * tuning circuit check these 4 data lines, include the DAT[1],
+ * this circuit will detect this interrupt, take this as a data on
+ * DAT[1], and adjust the delay cell wrongly.
+ * This is the hardware design limitation, to avoid this, for sdio
+ * device, config the auto tuning circuit only check DAT[0] and CMD
+ * line.
+ */
+ if (imx_data->init_card_type == MMC_TYPE_SDIO)
+ auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_1BIT_EN;
+
+ esdhc_clrset_le(host, ESDHC_VEND_SPEC2_AUTO_TUNE_MODE_MASK,
+ auto_tune_buswidth | ESDHC_VEND_SPEC2_AUTO_TUNE_CMD_EN,
+ ESDHC_VEND_SPEC2);
+
+ reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ reg |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
+ writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
+}
+
static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -597,37 +740,19 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
else
new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
- new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
- if (val & SDHCI_CTRL_TUNED_CLK) {
- new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL;
- new_val |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
- } else {
- new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
- new_val &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN;
- }
- writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
- } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
+ if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
- u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
- if (val & SDHCI_CTRL_TUNED_CLK) {
+ if (val & SDHCI_CTRL_TUNED_CLK)
v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
- } else {
+ else
v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
- m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
- m &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN;
- }
- if (val & SDHCI_CTRL_EXEC_TUNING) {
+ if (val & SDHCI_CTRL_EXEC_TUNING)
v |= ESDHC_MIX_CTRL_EXE_TUNE;
- m |= ESDHC_MIX_CTRL_FBCLK_SEL;
- m |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
- } else {
+ else
v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
- }
writel(v, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
- writel(m, host->ioaddr + ESDHC_MIX_CTRL);
}
return;
case SDHCI_TRANSFER_MODE:
@@ -764,6 +889,11 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
esdhc_clrset_le(host, mask, new_val, reg);
return;
+ case SDHCI_TIMEOUT_CONTROL:
+ esdhc_clrset_le(host, ESDHC_SYS_CTRL_DTOCV_MASK,
+ FIELD_PREP(ESDHC_SYS_CTRL_DTOCV_MASK, val),
+ ESDHC_SYSTEM_CONTROL);
+ return;
case SDHCI_SOFTWARE_RESET:
if (val & SDHCI_RESET_DATA)
new_val = readl(host->ioaddr + SDHCI_HOST_CONTROL);
@@ -865,7 +995,8 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
| ESDHC_CLOCK_MASK);
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) {
+ if ((imx_data->socdata->flags & ESDHC_FLAG_ERR010450) &&
+ (!(host->quirks2 & SDHCI_QUIRK2_NO_1_8_V))) {
unsigned int max_clock;
max_clock = imx_data->is_ddr ? 45000000 : 150000000;
@@ -946,9 +1077,73 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
SDHCI_HOST_CONTROL);
}
+static void esdhc_reset_tuning(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ u32 ctrl, tuning_ctrl, sys_ctrl;
+ int ret;
+
+ /* Reset the tuning circuit */
+ if (esdhc_is_usdhc(imx_data)) {
+ ctrl = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ ctrl &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN;
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
+ ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL);
+ writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
+ writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL);
+ /*
+ * enable the std tuning just in case it cleared in
+ * sdhc_esdhc_tuning_restore.
+ */
+ tuning_ctrl = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ if (!(tuning_ctrl & ESDHC_STD_TUNING_EN)) {
+ tuning_ctrl |= ESDHC_STD_TUNING_EN;
+ writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL);
+ }
+
+ /* set the reset tuning bit */
+ sys_ctrl = readl(host->ioaddr + ESDHC_SYSTEM_CONTROL);
+ sys_ctrl |= ESDHC_SYS_CTRL_RESET_TUNING;
+ writel(sys_ctrl, host->ioaddr + ESDHC_SYSTEM_CONTROL);
+
+ ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
+ ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ ctrl &= ~ESDHC_MIX_CTRL_EXE_TUNE;
+ writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
+ /* Make sure ESDHC_MIX_CTRL_EXE_TUNE cleared */
+ ret = readl_poll_timeout(host->ioaddr + SDHCI_AUTO_CMD_STATUS,
+ ctrl, !(ctrl & ESDHC_MIX_CTRL_EXE_TUNE), 1, 50);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc),
+ "Warning! clear execute tuning bit failed\n");
+ /*
+ * SDHCI_INT_DATA_AVAIL is W1C bit, set this bit will clear the
+ * usdhc IP internal logic flag execute_tuning_with_clr_buf, which
+ * will finally make sure the normal data transfer logic correct.
+ */
+ ctrl = readl(host->ioaddr + SDHCI_INT_STATUS);
+ ctrl |= SDHCI_INT_DATA_AVAIL;
+ writel(ctrl, host->ioaddr + SDHCI_INT_STATUS);
+ }
+ }
+}
+
+static void usdhc_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+
+ imx_data->init_card_type = card->type;
+}
+
static int usdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
+ int err;
/*
* i.MX uSDHC internally already uses a fixed optimized timing for
@@ -957,12 +1152,23 @@ static int usdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->timing == MMC_TIMING_UHS_DDR50)
return 0;
- return sdhci_execute_tuning(mmc, opcode);
+ /*
+ * Reset tuning circuit logic. If not, the previous tuning result
+ * will impact current tuning, make current tuning can't set the
+ * correct delay cell.
+ */
+ esdhc_reset_tuning(host);
+ err = sdhci_execute_tuning(mmc, opcode);
+ /* If tuning done, enable auto tuning */
+ if (!err && !host->tuning_err)
+ usdhc_auto_tuning_mode_sel_and_en(host);
+
+ return err;
}
static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
{
- u32 reg;
+ u32 reg, sys_ctrl;
u8 sw_rst;
int ret;
@@ -978,13 +1184,23 @@ static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
"warning! RESET_ALL never complete before sending tuning command\n");
reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
- reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
- ESDHC_MIX_CTRL_FBCLK_SEL;
+ reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL;
writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
- writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ writel(FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_PRE_MASK, val),
+ host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
dev_dbg(mmc_dev(host->mmc),
"tuning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n",
val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS));
+
+ /* set RST_FIFO to reset the async FIFO, and wat it to self-clear */
+ sys_ctrl = readl(host->ioaddr + ESDHC_SYSTEM_CONTROL);
+ sys_ctrl |= ESDHC_SYS_CTRL_RST_FIFO;
+ writel(sys_ctrl, host->ioaddr + ESDHC_SYSTEM_CONTROL);
+ ret = readl_poll_timeout(host->ioaddr + ESDHC_SYSTEM_CONTROL, sys_ctrl,
+ !(sys_ctrl & ESDHC_SYS_CTRL_RST_FIFO), 10, 100);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc),
+ "warning! RST_FIFO not clear in 100us\n");
}
static void esdhc_post_tuning(struct sdhci_host *host)
@@ -993,37 +1209,81 @@ static void esdhc_post_tuning(struct sdhci_host *host)
reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
reg &= ~ESDHC_MIX_CTRL_EXE_TUNE;
- reg |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
}
+/*
+ * find the largest pass window, and use the average delay of this
+ * largest window to get the best timing.
+ */
static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
{
int min, max, avg, ret;
+ int win_length, target_min, target_max, target_win_length;
+ u32 clk_tune_ctrl_status, temp;
- /* find the mininum delay first which can pass tuning */
- min = ESDHC_TUNE_CTRL_MIN;
- while (min < ESDHC_TUNE_CTRL_MAX) {
- esdhc_prepare_tuning(host, min);
- if (!mmc_send_tuning(host->mmc, opcode, NULL))
- break;
- min += ESDHC_TUNE_CTRL_STEP;
- }
-
- /* find the maxinum delay which can not pass tuning */
- max = min + ESDHC_TUNE_CTRL_STEP;
+ min = target_min = ESDHC_TUNE_CTRL_MIN;
+ max = target_max = ESDHC_TUNE_CTRL_MIN;
+ target_win_length = 0;
while (max < ESDHC_TUNE_CTRL_MAX) {
- esdhc_prepare_tuning(host, max);
- if (mmc_send_tuning(host->mmc, opcode, NULL)) {
- max -= ESDHC_TUNE_CTRL_STEP;
- break;
+ /* find the mininum delay first which can pass tuning */
+ while (min < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, min);
+ if (!mmc_send_tuning(host->mmc, opcode, NULL))
+ break;
+ min += ESDHC_TUNE_CTRL_STEP;
}
- max += ESDHC_TUNE_CTRL_STEP;
+
+ /* find the maxinum delay which can not pass tuning */
+ max = min + ESDHC_TUNE_CTRL_STEP;
+ while (max < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, max);
+ if (mmc_send_tuning(host->mmc, opcode, NULL)) {
+ max -= ESDHC_TUNE_CTRL_STEP;
+ break;
+ }
+ max += ESDHC_TUNE_CTRL_STEP;
+ }
+
+ win_length = max - min + 1;
+ /* get the largest pass window */
+ if (win_length > target_win_length) {
+ target_win_length = win_length;
+ target_min = min;
+ target_max = max;
+ }
+
+ /* continue to find the next pass window */
+ min = max + ESDHC_TUNE_CTRL_STEP;
}
/* use average delay to get the best timing */
- avg = (min + max) / 2;
+ avg = (target_min + target_max) / 2;
esdhc_prepare_tuning(host, avg);
+
+ /*
+ * adjust the delay according to tuning window, make preparation
+ * for the auto-tuning logic. According to hardware suggest, need
+ * to config the auto tuning window width to 3, to make the auto
+ * tuning logic have enough space to handle the sample point shift
+ * caused by temperature change.
+ */
+ clk_tune_ctrl_status = FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_PRE_MASK,
+ avg - ESDHC_AUTO_TUNING_WINDOW) |
+ FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_OUT_MASK,
+ ESDHC_AUTO_TUNING_WINDOW) |
+ FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_POST_MASK,
+ ESDHC_AUTO_TUNING_WINDOW);
+
+ writel(clk_tune_ctrl_status, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ ret = readl_poll_timeout(host->ioaddr + ESDHC_TUNE_CTRL_STATUS, temp,
+ clk_tune_ctrl_status ==
+ FIELD_GET(ESDHC_TUNE_CTRL_STATUS_TAP_SEL_MASK, temp),
+ 1, 10);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc),
+ "clock tuning control status not set in 10us\n");
+
ret = mmc_send_tuning(host->mmc, opcode, NULL);
esdhc_post_tuning(host);
@@ -1128,28 +1388,6 @@ static void esdhc_set_strobe_dll(struct sdhci_host *host)
"warning! HS400 strobe DLL status REF/SLV not lock in 50us, STROBE DLL status is %x!\n", v);
}
-static void esdhc_reset_tuning(struct sdhci_host *host)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
- u32 ctrl;
-
- /* Reset the tuning circuit */
- if (esdhc_is_usdhc(imx_data)) {
- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
- ctrl = readl(host->ioaddr + ESDHC_MIX_CTRL);
- ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
- ctrl &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
- writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL);
- writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
- } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
- ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
- ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
- writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
- }
- }
-}
-
static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
{
u32 m;
@@ -1200,12 +1438,22 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
break;
}
+ if (!(imx_data->socdata->flags & ESDHC_FLAG_DUMMY_PAD) &&
+ (timing == MMC_TIMING_UHS_SDR104 ||
+ timing == MMC_TIMING_MMC_HS200 ||
+ timing == MMC_TIMING_MMC_HS400))
+ m |= ESDHC_MIX_CTRL_FBCLK_SEL;
+ else
+ m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
+
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+
esdhc_change_pinstate(host, timing);
}
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
@@ -1220,17 +1468,6 @@ static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
return esdhc_is_usdhc(imx_data) ? 1 << 29 : 1 << 27;
}
-static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
-
- /* use maximum timeout counter */
- esdhc_clrset_le(host, ESDHC_SYS_CTRL_DTOCV_MASK,
- esdhc_is_usdhc(imx_data) ? 0xF : 0xE,
- SDHCI_TIMEOUT_CONTROL);
-}
-
static u32 esdhc_cqhci_irq(struct sdhci_host *host, u32 intmask)
{
int cmd_error = 0;
@@ -1244,6 +1481,17 @@ static u32 esdhc_cqhci_irq(struct sdhci_host *host, u32 intmask)
return 0;
}
+static void esdhc_hw_reset(struct sdhci_host *host)
+{
+ esdhc_clrset_le(host, ESDHC_SYS_CTRL_IPP_RST_N, 0, ESDHC_SYSTEM_CONTROL);
+ /* eMMC spec requires minimum 1us, here delay between 1-10us */
+ usleep_range(1, 10);
+ esdhc_clrset_le(host, ESDHC_SYS_CTRL_IPP_RST_N,
+ ESDHC_SYS_CTRL_IPP_RST_N, ESDHC_SYSTEM_CONTROL);
+ /* eMMC spec requires minimum 200us, here delay between 200-300us */
+ usleep_range(200, 300);
+}
+
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
@@ -1256,12 +1504,12 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.get_min_clock = esdhc_pltfm_get_min_clock,
.get_max_timeout_count = esdhc_get_max_timeout_count,
.get_ro = esdhc_pltfm_get_ro,
- .set_timeout = esdhc_set_timeout,
.set_bus_width = esdhc_pltfm_set_bus_width,
.set_uhs_signaling = esdhc_set_uhs_signaling,
.reset = esdhc_reset,
.irq = esdhc_cqhci_irq,
.dump_vendor_regs = esdhc_dump_debug_regs,
+ .hw_reset = esdhc_hw_reset,
};
static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
@@ -1277,7 +1525,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
struct cqhci_host *cq_host = host->mmc->cqe_private;
- int tmp;
+ u32 tmp;
if (esdhc_is_usdhc(imx_data)) {
/*
@@ -1305,8 +1553,10 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
* TO1.1, it's harmless for MX6SL
*/
- writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
- host->ioaddr + 0x6c);
+ if (!(imx_data->socdata->flags & ESDHC_FLAG_SKIP_ERR004536)) {
+ writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
+ host->ioaddr + 0x6c);
+ }
/* disable DLL_CTRL delay line settings */
writel(0x0, host->ioaddr + ESDHC_DLL_CTRL);
@@ -1330,19 +1580,36 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
- tmp |= ESDHC_STD_TUNING_EN |
- ESDHC_TUNING_START_TAP_DEFAULT;
- if (imx_data->boarddata.tuning_start_tap) {
- tmp &= ~ESDHC_TUNING_START_TAP_MASK;
+ tmp |= ESDHC_STD_TUNING_EN;
+
+ /*
+ * ROM code or bootloader may config the start tap
+ * and step, unmask them first.
+ */
+ tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK);
+ if (imx_data->boarddata.tuning_start_tap)
tmp |= imx_data->boarddata.tuning_start_tap;
- }
+ else
+ tmp |= ESDHC_TUNING_START_TAP_DEFAULT;
if (imx_data->boarddata.tuning_step) {
- tmp &= ~ESDHC_TUNING_STEP_MASK;
tmp |= imx_data->boarddata.tuning_step
<< ESDHC_TUNING_STEP_SHIFT;
+ } else {
+ tmp |= ESDHC_TUNING_STEP_DEFAULT
+ << ESDHC_TUNING_STEP_SHIFT;
}
+ /*
+ * Config the tuning window to the hardware suggested value 3.
+ * This tuning window is used for auto tuning logic. The default
+ * tuning window is 2, here change to 3 make the window a bit
+ * wider, give auto tuning enough space to handle the sample
+ * point shift cause by temperature change.
+ */
+ tmp &= ~ESDHC_TUNING_WINDOW_MASK;
+ tmp |= FIELD_PREP(ESDHC_TUNING_WINDOW_MASK, ESDHC_AUTO_TUNING_WINDOW);
+
/* Disable the CMD CRC check for tuning, if not, need to
* add some delay after every tuning command, because
* hardware standard tuning logic will directly go to next
@@ -1357,7 +1624,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
} else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
/*
- * ESDHC_STD_TUNING_EN may be configed in bootloader
+ * ESDHC_STD_TUNING_EN may be configured in bootloader
* or ROM code, so clear this bit here to make sure
* the manual tuning can work.
*/
@@ -1368,7 +1635,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
/*
* On i.MX8MM, we are running Dual Linux OS, with 1st Linux using SD Card
- * as rootfs storage, 2nd Linux using eMMC as rootfs storage. We let the
+ * as rootfs storage, 2nd Linux using eMMC as rootfs storage. We let
* the 1st linux configure power/clock for the 2nd Linux.
*
* When the 2nd Linux is booting into rootfs stage, we let the 1st Linux
@@ -1383,6 +1650,63 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
}
}
+static void sdhc_esdhc_tuning_save(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ /*
+ * SD/eMMC do not need this tuning save because it will re-init
+ * after system resume back.
+ * Here save the tuning delay value for SDIO device since it may
+ * keep power during system PM. And for usdhc, only SDR50 and
+ * SDR104 mode for SDIO device need to do tuning, and need to
+ * save/restore.
+ */
+ if (host->timing == MMC_TIMING_UHS_SDR50 ||
+ host->timing == MMC_TIMING_UHS_SDR104) {
+ reg = readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ reg = FIELD_GET(ESDHC_TUNE_CTRL_STATUS_TAP_SEL_PRE_MASK, reg);
+ imx_data->boarddata.saved_tuning_delay_cell = reg;
+ }
+}
+
+static void sdhc_esdhc_tuning_restore(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ if (host->timing == MMC_TIMING_UHS_SDR50 ||
+ host->timing == MMC_TIMING_UHS_SDR104) {
+ /*
+ * restore the tuning delay value actually is a
+ * manual tuning method, so clear the standard
+ * tuning enable bit here. Will set back this
+ * ESDHC_STD_TUNING_EN in esdhc_reset_tuning()
+ * when trigger re-tuning.
+ */
+ reg = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ reg &= ~ESDHC_STD_TUNING_EN;
+ writel(reg, host->ioaddr + ESDHC_TUNING_CTRL);
+
+ reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ reg |= ESDHC_MIX_CTRL_SMPCLK_SEL;
+ if (!(imx_data->socdata->flags & ESDHC_FLAG_DUMMY_PAD))
+ reg |= ESDHC_MIX_CTRL_FBCLK_SEL;
+ writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
+
+ writel(FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_PRE_MASK,
+ imx_data->boarddata.saved_tuning_delay_cell) |
+ FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_OUT_MASK,
+ ESDHC_AUTO_TUNING_WINDOW) |
+ FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_POST_MASK,
+ ESDHC_AUTO_TUNING_WINDOW),
+ host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ }
+}
+
static void esdhc_cqe_enable(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -1426,7 +1750,7 @@ static void esdhc_cqe_enable(struct mmc_host *mmc)
* system resume back.
*/
cqhci_writel(cq_host, 0, CQHCI_CTL);
- if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT)
+ if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
dev_err(mmc_dev(host->mmc),
"failed to exit halt state when enable CQE\n");
@@ -1454,15 +1778,15 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
struct esdhc_platform_data *boarddata = &imx_data->boarddata;
int ret;
- if (of_get_property(np, "fsl,wp-controller", NULL))
+ if (of_property_read_bool(np, "fsl,wp-controller"))
boarddata->wp_type = ESDHC_WP_CONTROLLER;
/*
* If we have this property, then activate WP check.
- * Retrieveing and requesting the actual WP GPIO will happen
+ * Retrieving and requesting the actual WP GPIO will happen
* in the call to mmc_of_parse().
*/
- if (of_property_read_bool(np, "wp-gpios"))
+ if (of_property_present(np, "wp-gpios"))
boarddata->wp_type = ESDHC_WP_GPIO;
of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step);
@@ -1471,7 +1795,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
of_property_read_u32(np, "fsl,strobe-dll-delay-target",
&boarddata->strobe_dll_delay_target);
- if (of_find_property(np, "no-1-8-v", NULL))
+ if (of_property_read_bool(np, "no-1-8-v"))
host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line))
@@ -1491,6 +1815,10 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
if (ret)
return ret;
+ /* HS400/HS400ES require 8 bit bus */
+ if (!(host->mmc->caps & MMC_CAP_8_BIT_DATA))
+ host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
+
if (mmc_gpio_get_cd(host->mmc) >= 0)
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
@@ -1516,6 +1844,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
imx_data->socdata = device_get_match_data(&pdev->dev);
+ host->quirks |= imx_data->socdata->quirks;
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
@@ -1538,7 +1867,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
}
pltfm_host->clk = imx_data->clk_per;
- pltfm_host->clock = clk_get_rate(pltfm_host->clk);
err = clk_prepare_enable(imx_data->clk_per);
if (err)
goto free_sdhci;
@@ -1549,6 +1877,13 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_ipg_clk;
+ pltfm_host->clock = clk_get_rate(pltfm_host->clk);
+ if (!pltfm_host->clock) {
+ dev_err(mmc_dev(host->mmc), "could not get clk rate\n");
+ err = -EINVAL;
+ goto disable_ahb_clk;
+ }
+
imx_data->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(imx_data->pinctrl))
dev_warn(mmc_dev(host->mmc), "could not get pinctrl\n");
@@ -1558,7 +1893,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
/* GPIO CD can be set as a wakeup source */
- host->mmc->caps |= MMC_CAP_CD_WAKE;
+ if (!(imx_data->socdata->flags & ESDHC_FLAG_SKIP_CD_WAKE))
+ host->mmc->caps |= MMC_CAP_CD_WAKE;
if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200))
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
@@ -1573,6 +1909,14 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
* to replace the standard one in sdhci_ops.
*/
host->mmc_host_ops.execute_tuning = usdhc_execute_tuning;
+
+ /*
+ * Link usdhc specific mmc_host_ops init card function,
+ * to distinguish the card type.
+ */
+ host->mmc_host_ops.init_card = usdhc_init_card;
+
+ host->max_timeout_count = 0xF;
}
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
@@ -1645,11 +1989,10 @@ disable_per_clk:
free_sdhci:
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
- sdhci_pltfm_free(pdev);
return err;
}
-static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
+static void sdhci_esdhc_imx_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1669,13 +2012,8 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
-
- sdhci_pltfm_free(pdev);
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_esdhc_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -1683,11 +2021,14 @@ static int sdhci_esdhc_suspend(struct device *dev)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int ret;
- if (host->mmc->caps2 & MMC_CAP2_CQE) {
- ret = cqhci_suspend(host->mmc);
- if (ret)
- return ret;
- }
+ /*
+ * Switch to runtime resume for two reasons:
+ * 1, there is register access (e.g., wakeup control register), so
+ * need to make sure gate on ipg clock.
+ * 2, make sure the pm_runtime_force_resume() in sdhci_esdhc_resume() really
+ * invoke its ->runtime_resume callback (needs_force_resume = 1).
+ */
+ pm_runtime_get_sync(dev);
if ((imx_data->socdata->flags & ESDHC_FLAG_STATE_LOST_IN_LPMODE) &&
(host->tuning_mode != SDHCI_TUNING_MODE_1)) {
@@ -1695,49 +2036,80 @@ static int sdhci_esdhc_suspend(struct device *dev)
mmc_retune_needed(host->mmc);
}
- if (host->tuning_mode != SDHCI_TUNING_MODE_3)
- mmc_retune_needed(host->mmc);
-
- ret = sdhci_suspend_host(host);
- if (ret)
- return ret;
-
- ret = pinctrl_pm_select_sleep_state(dev);
- if (ret)
- return ret;
+ /*
+ * For the device need to keep power during system PM, need
+ * to save the tuning delay value just in case the usdhc
+ * lost power during system PM.
+ */
+ if (mmc_card_keep_power(host->mmc) && mmc_card_wake_sdio_irq(host->mmc) &&
+ esdhc_is_usdhc(imx_data))
+ sdhc_esdhc_tuning_save(host);
+
+ if (device_may_wakeup(dev)) {
+ /* The irqs of imx are not shared. It is safe to disable */
+ disable_irq(host->irq);
+ ret = sdhci_enable_irq_wakeups(host);
+ if (!ret)
+ dev_warn(dev, "Failed to enable irq wakeup\n");
+ } else {
+ /*
+ * For the device which works as wakeup source, no need
+ * to change the pinctrl to sleep state.
+ * e.g. For SDIO device, the interrupt share with data pin,
+ * but the pinctrl sleep state may config the data pin to
+ * other function like GPIO function to save power in PM,
+ * which finally block the SDIO wakeup function.
+ */
+ ret = pinctrl_pm_select_sleep_state(dev);
+ if (ret)
+ return ret;
+ }
ret = mmc_gpio_set_cd_wake(host->mmc, true);
+ /*
+ * Make sure invoke runtime_suspend to gate off clock.
+ * uSDHC IP supports in-band SDIO wakeup even without clock.
+ */
+ pm_runtime_force_suspend(dev);
+
return ret;
}
static int sdhci_esdhc_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int ret;
- ret = pinctrl_pm_select_default_state(dev);
+ pm_runtime_force_resume(dev);
+
+ ret = mmc_gpio_set_cd_wake(host->mmc, false);
if (ret)
return ret;
/* re-initialize hw state in case it's lost in low power mode */
sdhci_esdhc_imx_hwinit(host);
- ret = sdhci_resume_host(host);
- if (ret)
- return ret;
+ if (host->irq_wake_enabled) {
+ sdhci_disable_irq_wakeups(host);
+ enable_irq(host->irq);
+ }
- if (host->mmc->caps2 & MMC_CAP2_CQE)
- ret = cqhci_resume(host->mmc);
+ /*
+ * restore the saved tuning delay value for the device which keep
+ * power during system PM.
+ */
+ if (mmc_card_keep_power(host->mmc) && mmc_card_wake_sdio_irq(host->mmc) &&
+ esdhc_is_usdhc(imx_data))
+ sdhc_esdhc_tuning_restore(host);
- if (!ret)
- ret = mmc_gpio_set_cd_wake(host->mmc, false);
+ pm_runtime_put_autosuspend(dev);
return ret;
}
-#endif
-#ifdef CONFIG_PM
static int sdhci_esdhc_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -1751,9 +2123,7 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
return ret;
}
- ret = sdhci_runtime_suspend_host(host);
- if (ret)
- return ret;
+ sdhci_runtime_suspend_host(host);
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
@@ -1767,7 +2137,7 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
- return ret;
+ return 0;
}
static int sdhci_esdhc_runtime_resume(struct device *dev)
@@ -1797,17 +2167,13 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
esdhc_pltfm_set_clock(host, imx_data->actual_clock);
- err = sdhci_runtime_resume_host(host, 0);
- if (err)
- goto disable_ipg_clk;
+ sdhci_runtime_resume_host(host, 0);
if (host->mmc->caps2 & MMC_CAP2_CQE)
err = cqhci_resume(host->mmc);
return err;
-disable_ipg_clk:
- clk_disable_unprepare(imx_data->clk_ipg);
disable_per_clk:
clk_disable_unprepare(imx_data->clk_per);
disable_ahb_clk:
@@ -1817,12 +2183,10 @@ remove_pm_qos_request:
cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
return err;
}
-#endif
static const struct dev_pm_ops sdhci_esdhc_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_esdhc_suspend, sdhci_esdhc_resume)
- SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend,
- sdhci_esdhc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_esdhc_suspend, sdhci_esdhc_resume)
+ RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend, sdhci_esdhc_runtime_resume, NULL)
};
static struct platform_driver sdhci_esdhc_imx_driver = {
@@ -1830,7 +2194,7 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
.name = "sdhci-esdhc-imx",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = imx_esdhc_dt_ids,
- .pm = &sdhci_esdhc_pmops,
+ .pm = pm_ptr(&sdhci_esdhc_pmops),
},
.probe = sdhci_esdhc_imx_probe,
.remove = sdhci_esdhc_imx_remove,
diff --git a/drivers/mmc/host/sdhci-esdhc-mcf.c b/drivers/mmc/host/sdhci-esdhc-mcf.c
index 05926bf5ecf9..375fce5639d7 100644
--- a/drivers/mmc/host/sdhci-esdhc-mcf.c
+++ b/drivers/mmc/host/sdhci-esdhc-mcf.c
@@ -299,9 +299,8 @@ static void esdhc_mcf_pltfm_set_bus_width(struct sdhci_host *host, int width)
static void esdhc_mcf_request_done(struct sdhci_host *host,
struct mmc_request *mrq)
{
- struct scatterlist *sg;
+ struct sg_mapping_iter sgm;
u32 *buffer;
- int i;
if (!mrq->data || !mrq->data->bytes_xfered)
goto exit_done;
@@ -313,10 +312,13 @@ static void esdhc_mcf_request_done(struct sdhci_host *host,
* On mcf5441x there is no hw sdma option/flag to select the dma
* transfer endiannes. A swap after the transfer is needed.
*/
- for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) {
- buffer = (u32 *)sg_virt(sg);
- esdhc_mcf_buffer_swap32(buffer, sg->length);
+ sg_miter_start(&sgm, mrq->data->sg, mrq->data->sg_len,
+ SG_MITER_ATOMIC | SG_MITER_TO_SG | SG_MITER_FROM_SG);
+ while (sg_miter_next(&sgm)) {
+ buffer = sgm.addr;
+ esdhc_mcf_buffer_swap32(buffer, sgm.length);
}
+ sg_miter_stop(&sgm);
exit_done:
mmc_request_done(host->mmc, mrq);
@@ -333,7 +335,7 @@ static void esdhc_mcf_copy_to_bounce_buffer(struct sdhci_host *host,
data->blksz * data->blocks);
}
-static struct sdhci_ops sdhci_esdhc_ops = {
+static const struct sdhci_ops sdhci_esdhc_ops = {
.reset = esdhc_mcf_reset,
.set_clock = esdhc_mcf_pltfm_set_clock,
.get_max_clock = esdhc_mcf_pltfm_get_max_clock,
@@ -424,28 +426,22 @@ static int sdhci_esdhc_mcf_probe(struct platform_device *pdev)
host->flags |= SDHCI_AUTO_CMD12;
mcf_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(mcf_data->clk_ipg)) {
- err = PTR_ERR(mcf_data->clk_ipg);
- goto err_exit;
- }
+ if (IS_ERR(mcf_data->clk_ipg))
+ return PTR_ERR(mcf_data->clk_ipg);
mcf_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(mcf_data->clk_ahb)) {
- err = PTR_ERR(mcf_data->clk_ahb);
- goto err_exit;
- }
+ if (IS_ERR(mcf_data->clk_ahb))
+ return PTR_ERR(mcf_data->clk_ahb);
mcf_data->clk_per = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(mcf_data->clk_per)) {
- err = PTR_ERR(mcf_data->clk_per);
- goto err_exit;
- }
+ if (IS_ERR(mcf_data->clk_per))
+ return PTR_ERR(mcf_data->clk_per);
pltfm_host->clk = mcf_data->clk_per;
pltfm_host->clock = clk_get_rate(pltfm_host->clk);
err = clk_prepare_enable(mcf_data->clk_per);
if (err)
- goto err_exit;
+ return err;
err = clk_prepare_enable(mcf_data->clk_ipg);
if (err)
@@ -483,13 +479,10 @@ unprep_ipg:
clk_disable_unprepare(mcf_data->clk_ipg);
unprep_per:
clk_disable_unprepare(mcf_data->clk_per);
-err_exit:
- sdhci_pltfm_free(pdev);
-
return err;
}
-static int sdhci_esdhc_mcf_remove(struct platform_device *pdev)
+static void sdhci_esdhc_mcf_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -500,10 +493,6 @@ static int sdhci_esdhc_mcf_remove(struct platform_device *pdev)
clk_disable_unprepare(mcf_data->clk_ipg);
clk_disable_unprepare(mcf_data->clk_ahb);
clk_disable_unprepare(mcf_data->clk_per);
-
- sdhci_pltfm_free(pdev);
-
- return 0;
}
static struct platform_driver sdhci_esdhc_mcf_driver = {
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index cce390fe9cf3..35ef5c5f5146 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
/*
* iProc SDHCI platform driver
@@ -20,7 +10,7 @@
#include <linux/module.h>
#include <linux/mmc/host.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "sdhci-pltfm.h"
struct sdhci_iproc_data {
@@ -28,6 +18,7 @@ struct sdhci_iproc_data {
u32 caps;
u32 caps1;
u32 mmc_caps;
+ bool missing_caps;
};
struct sdhci_iproc_host {
@@ -173,6 +164,23 @@ static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host)
return pltfm_host->clock;
}
+/*
+ * There is a known bug on BCM2711's SDHCI core integration where the
+ * controller will hang when the difference between the core clock and the bus
+ * clock is too great. Specifically this can be reproduced under the following
+ * conditions:
+ *
+ * - No SD card plugged in, polling thread is running, probing cards at
+ * 100 kHz.
+ * - BCM2711's core clock configured at 500MHz or more
+ *
+ * So we set 200kHz as the minimum clock frequency available for that SoC.
+ */
+static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host)
+{
+ return 200000;
+}
+
static const struct sdhci_ops sdhci_iproc_ops = {
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_iproc_get_max_clock,
@@ -244,7 +252,6 @@ static const struct sdhci_iproc_data iproc_data = {
static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
- SDHCI_QUIRK_MISSING_CAPS |
SDHCI_QUIRK_NO_HISPD_BIT,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &sdhci_iproc_32only_ops,
@@ -259,6 +266,7 @@ static const struct sdhci_iproc_data bcm2835_data = {
.caps1 = SDHCI_DRIVER_TYPE_A |
SDHCI_DRIVER_TYPE_C,
.mmc_caps = 0x00000000,
+ .missing_caps = true,
};
static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
@@ -271,6 +279,7 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
.set_clock = sdhci_set_clock,
.set_power = sdhci_set_power_and_bus_voltage,
.get_max_clock = sdhci_iproc_get_max_clock,
+ .get_min_clock = sdhci_iproc_bcm2711_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -287,8 +296,7 @@ static const struct sdhci_iproc_data bcm2711_data = {
};
static const struct sdhci_pltfm_data sdhci_bcm7211a0_pltfm_data = {
- .quirks = SDHCI_QUIRK_MISSING_CAPS |
- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_BROKEN_DMA |
SDHCI_QUIRK_BROKEN_ADMA,
.ops = &sdhci_iproc_ops,
@@ -307,6 +315,7 @@ static const struct sdhci_iproc_data bcm7211a0_data = {
SDHCI_CAN_DO_HISPD,
.caps1 = SDHCI_DRIVER_TYPE_C |
SDHCI_DRIVER_TYPE_D,
+ .missing_caps = true,
};
static const struct of_device_id sdhci_iproc_of_match[] = {
@@ -370,42 +379,25 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
ret = mmc_of_parse(host->mmc);
if (ret)
- goto err;
+ return ret;
sdhci_get_property(pdev);
host->mmc->caps |= iproc_host->data->mmc_caps;
if (dev->of_node) {
- pltfm_host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(pltfm_host->clk)) {
- ret = PTR_ERR(pltfm_host->clk);
- goto err;
- }
- ret = clk_prepare_enable(pltfm_host->clk);
- if (ret) {
- dev_err(dev, "failed to enable host clk\n");
- goto err;
- }
+ pltfm_host->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(pltfm_host->clk))
+ return PTR_ERR(pltfm_host->clk);
}
- if (iproc_host->data->pdata->quirks & SDHCI_QUIRK_MISSING_CAPS) {
- host->caps = iproc_host->data->caps;
- host->caps1 = iproc_host->data->caps1;
+ if (iproc_host->data->missing_caps) {
+ __sdhci_read_caps(host, NULL,
+ &iproc_host->data->caps,
+ &iproc_host->data->caps1);
}
- ret = sdhci_add_host(host);
- if (ret)
- goto err_clk;
-
- return 0;
-
-err_clk:
- if (dev->of_node)
- clk_disable_unprepare(pltfm_host->clk);
-err:
- sdhci_pltfm_free(pdev);
- return ret;
+ return sdhci_add_host(host);
}
static void sdhci_iproc_shutdown(struct platform_device *pdev)
@@ -422,7 +414,7 @@ static struct platform_driver sdhci_iproc_driver = {
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_iproc_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove = sdhci_pltfm_remove,
.shutdown = sdhci_iproc_shutdown,
};
module_platform_driver(sdhci_iproc_driver);
diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c
index 148b37ac6564..bda71d5966dc 100644
--- a/drivers/mmc/host/sdhci-milbeaut.c
+++ b/drivers/mmc/host/sdhci-milbeaut.c
@@ -258,7 +258,7 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
ret = mmc_of_parse(host->mmc);
if (ret)
- goto err;
+ return ret;
platform_set_drvdata(pdev, host);
@@ -267,23 +267,19 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
host->irq = irq;
host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(host->ioaddr)) {
- ret = PTR_ERR(host->ioaddr);
- goto err;
- }
+ if (IS_ERR(host->ioaddr))
+ return PTR_ERR(host->ioaddr);
if (dev_of_node(dev)) {
sdhci_get_of_property(pdev);
priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(priv->clk_iface)) {
- ret = PTR_ERR(priv->clk_iface);
- goto err;
- }
+ if (IS_ERR(priv->clk_iface))
+ return PTR_ERR(priv->clk_iface);
ret = clk_prepare_enable(priv->clk_iface);
if (ret)
- goto err;
+ return ret;
priv->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(priv->clk)) {
@@ -308,12 +304,10 @@ err_add_host:
clk_disable_unprepare(priv->clk);
err_clk:
clk_disable_unprepare(priv->clk_iface);
-err:
- sdhci_free_host(host);
return ret;
}
-static int sdhci_milbeaut_remove(struct platform_device *pdev)
+static void sdhci_milbeaut_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct f_sdhost_priv *priv = sdhci_priv(host);
@@ -324,20 +318,17 @@ static int sdhci_milbeaut_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk_iface);
clk_disable_unprepare(priv->clk);
- sdhci_free_host(host);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static struct platform_driver sdhci_milbeaut_driver = {
.driver = {
.name = "sdhci-milbeaut",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = of_match_ptr(mlb_dt_ids),
+ .of_match_table = mlb_dt_ids,
},
.probe = sdhci_milbeaut_probe,
- .remove = sdhci_milbeaut_remove,
+ .remove = sdhci_milbeaut_remove,
};
module_platform_driver(sdhci_milbeaut_driver);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e44b7a66b73c..3b85233131b3 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -6,18 +6,21 @@
*/
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/mmc/mmc.h>
#include <linux/pm_runtime.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/iopoll.h>
-#include <linux/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/interconnect.h>
+#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/reset.h>
+#include <soc/qcom/ice.h>
+
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -78,6 +81,7 @@
#define CORE_IO_PAD_PWR_SWITCH_EN BIT(15)
#define CORE_IO_PAD_PWR_SWITCH BIT(16)
#define CORE_HC_SELECT_IN_EN BIT(18)
+#define CORE_HC_SELECT_IN_SDR50 (4 << 19)
#define CORE_HC_SELECT_IN_HS400 (6 << 19)
#define CORE_HC_SELECT_IN_MASK (7 << 19)
@@ -131,9 +135,18 @@
/* Timeout value to avoid infinite waiting for pwr_irq */
#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+/* Max load for eMMC Vdd supply */
+#define MMC_VMMC_MAX_LOAD_UA 570000
+
/* Max load for eMMC Vdd-io supply */
#define MMC_VQMMC_MAX_LOAD_UA 325000
+/* Max load for SD Vdd supply */
+#define SD_VMMC_MAX_LOAD_UA 800000
+
+/* Max load for SD Vdd-io supply */
+#define SD_VQMMC_MAX_LOAD_UA 22000
+
#define msm_host_readl(msm_host, host, offset) \
msm_host->var_ops->msm_readl_relaxed(host, offset)
@@ -256,12 +269,14 @@ struct sdhci_msm_variant_info {
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
- void __iomem *ice_mem; /* MSM ICE mapped address (if available) */
int pwr_irq; /* power irq */
struct clk *bus_clk; /* SDHC bus voter clock */
struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
- /* core, iface, cal, sleep, and ice clocks */
- struct clk_bulk_data bulk_clks[5];
+ /* core, iface, cal and sleep clocks */
+ struct clk_bulk_data bulk_clks[4];
+#ifdef CONFIG_MMC_CRYPTO
+ struct qcom_ice *ice;
+#endif
unsigned long clk_rate;
struct mmc_host *mmc;
bool use_14lpp_dll_reset;
@@ -329,41 +344,43 @@ static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
writel_relaxed(val, host->ioaddr + offset);
}
-static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host)
+static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host,
+ unsigned int clock,
+ unsigned int timing)
{
- struct mmc_ios ios = host->mmc->ios;
/*
* The SDHC requires internal clock frequency to be double the
* actual clock that will be set for DDR mode. The controller
* uses the faster clock(100/400MHz) for some of its parts and
* send the actual required clock (50/200MHz) to the card.
*/
- if (ios.timing == MMC_TIMING_UHS_DDR50 ||
- ios.timing == MMC_TIMING_MMC_DDR52 ||
- ios.timing == MMC_TIMING_MMC_HS400 ||
+ if (timing == MMC_TIMING_UHS_DDR50 ||
+ timing == MMC_TIMING_MMC_DDR52 ||
+ (timing == MMC_TIMING_MMC_HS400 &&
+ clock == MMC_HS200_MAX_DTR) ||
host->flags & SDHCI_HS400_TUNING)
return 2;
return 1;
}
static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
- unsigned int clock)
+ unsigned int clock,
+ unsigned int timing)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- struct mmc_ios curr_ios = host->mmc->ios;
struct clk *core_clk = msm_host->bulk_clks[0].clk;
unsigned long achieved_rate;
unsigned int desired_rate;
unsigned int mult;
int rc;
- mult = msm_get_clock_mult_for_bus_mode(host);
+ mult = msm_get_clock_mult_for_bus_mode(host, clock, timing);
desired_rate = clock * mult;
rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), desired_rate);
if (rc) {
pr_err("%s: Failed to set clock at rate %u at timing %d\n",
- mmc_hostname(host->mmc), desired_rate, curr_ios.timing);
+ mmc_hostname(host->mmc), desired_rate, timing);
return;
}
@@ -382,7 +399,7 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
msm_host->clk_rate = desired_rate;
pr_debug("%s: Setting clock at rate %lu at timing %d\n",
- mmc_hostname(host->mmc), achieved_rate, curr_ios.timing);
+ mmc_hostname(host->mmc), achieved_rate, timing);
}
/* Platform specific tuning */
@@ -1119,6 +1136,10 @@ static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host)
{
struct mmc_ios *ios = &host->mmc->ios;
+ if (ios->timing == MMC_TIMING_UHS_SDR50 &&
+ host->flags & SDHCI_SDR50_NEEDS_TUNING)
+ return true;
+
/*
* Tuning is required for SDR104, HS200 and HS400 cards and
* if clock frequency is greater than 100MHz in these modes.
@@ -1187,6 +1208,8 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
struct mmc_ios ios = host->mmc->ios;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_msm_offset *msm_offset = msm_host->offset;
+ u32 config;
if (!sdhci_msm_is_tuning_needed(host)) {
msm_host->use_cdr = false;
@@ -1203,6 +1226,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
*/
msm_host->tuning_done = 0;
+ if (ios.timing == MMC_TIMING_UHS_SDR50 &&
+ host->flags & SDHCI_SDR50_NEEDS_TUNING) {
+ config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
+ config &= ~CORE_HC_SELECT_IN_MASK;
+ config |= CORE_HC_SELECT_IN_EN | CORE_HC_SELECT_IN_SDR50;
+ writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
+ }
+
/*
* For HS400 tuning in HS200 timing requires:
* - select MCLK/2 in VENDOR_SPEC
@@ -1210,7 +1241,7 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
*/
if (host->flags & SDHCI_HS400_TUNING) {
sdhci_msm_hc_select_mode(host);
- msm_set_clock_rate_for_bus_mode(host, ios.clock);
+ msm_set_clock_rate_for_bus_mode(host, ios.clock, ios.timing);
host->flags &= ~SDHCI_HS400_TUNING;
}
@@ -1398,11 +1429,48 @@ static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level)
return ret;
}
-static int sdhci_msm_set_vmmc(struct mmc_host *mmc)
+static void msm_config_vmmc_regulator(struct mmc_host *mmc, bool hpm)
+{
+ int load;
+
+ if (!hpm)
+ load = 0;
+ else if (!mmc->card)
+ load = max(MMC_VMMC_MAX_LOAD_UA, SD_VMMC_MAX_LOAD_UA);
+ else if (mmc_card_mmc(mmc->card))
+ load = MMC_VMMC_MAX_LOAD_UA;
+ else if (mmc_card_sd(mmc->card))
+ load = SD_VMMC_MAX_LOAD_UA;
+ else
+ return;
+
+ regulator_set_load(mmc->supply.vmmc, load);
+}
+
+static void msm_config_vqmmc_regulator(struct mmc_host *mmc, bool hpm)
+{
+ int load;
+
+ if (!hpm)
+ load = 0;
+ else if (!mmc->card)
+ load = max(MMC_VQMMC_MAX_LOAD_UA, SD_VQMMC_MAX_LOAD_UA);
+ else if (mmc_card_sd(mmc->card))
+ load = SD_VQMMC_MAX_LOAD_UA;
+ else
+ return;
+
+ regulator_set_load(mmc->supply.vqmmc, load);
+}
+
+static int sdhci_msm_set_vmmc(struct sdhci_msm_host *msm_host,
+ struct mmc_host *mmc, bool hpm)
{
if (IS_ERR(mmc->supply.vmmc))
return 0;
+ msm_config_vmmc_regulator(mmc, hpm);
+
return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd);
}
@@ -1415,6 +1483,8 @@ static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host,
if (msm_host->vqmmc_enabled == level)
return 0;
+ msm_config_vqmmc_regulator(mmc, level);
+
if (level) {
/* Set the IO voltage regulator to default voltage level */
if (msm_host->caps_0 & CORE_3_0V_SUPPORT)
@@ -1511,6 +1581,7 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_host *mmc = host->mmc;
bool done = false;
u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
const struct sdhci_msm_offset *msm_offset =
@@ -1568,6 +1639,12 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
"%s: pwr_irq for req: (%d) timed out\n",
mmc_hostname(host->mmc), req_type);
}
+
+ if ((req_type & REQ_BUS_ON) && mmc->card && !mmc->ops->get_cd(mmc)) {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ host->pwr = 0;
+ }
+
pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
__func__, req_type);
}
@@ -1626,6 +1703,13 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
udelay(10);
}
+ if ((irq_status & CORE_PWRCTL_BUS_ON) && mmc->card &&
+ !mmc->ops->get_cd(mmc)) {
+ msm_host_writel(msm_host, CORE_PWRCTL_BUS_FAIL, host,
+ msm_offset->core_pwrctl_ctl);
+ return;
+ }
+
/* Handle BUS ON/OFF*/
if (irq_status & CORE_PWRCTL_BUS_ON) {
pwr_state = REQ_BUS_ON;
@@ -1637,7 +1721,8 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
}
if (pwr_state) {
- ret = sdhci_msm_set_vmmc(mmc);
+ ret = sdhci_msm_set_vmmc(msm_host, mmc,
+ pwr_state & REQ_BUS_ON);
if (!ret)
ret = sdhci_msm_set_vqmmc(msm_host, mmc,
pwr_state & REQ_BUS_ON);
@@ -1781,6 +1866,7 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_ios ios = host->mmc->ios;
if (!clock) {
host->mmc->actual_clock = msm_host->clk_rate = 0;
@@ -1789,7 +1875,7 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_msm_hc_select_mode(host);
- msm_set_clock_rate_for_bus_mode(host, clock);
+ msm_set_clock_rate_for_bus_mode(host, ios.clock, ios.timing);
out:
__sdhci_msm_set_clock(host, clock);
}
@@ -1802,216 +1888,136 @@ out:
#ifdef CONFIG_MMC_CRYPTO
-#define AES_256_XTS_KEY_SIZE 64
-
-/* QCOM ICE registers */
-
-#define QCOM_ICE_REG_VERSION 0x0008
-
-#define QCOM_ICE_REG_FUSE_SETTING 0x0010
-#define QCOM_ICE_FUSE_SETTING_MASK 0x1
-#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
-#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
-
-#define QCOM_ICE_REG_BIST_STATUS 0x0070
-#define QCOM_ICE_BIST_STATUS_MASK 0xF0000000
-
-#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
-
-#define sdhci_msm_ice_writel(host, val, reg) \
- writel((val), (host)->ice_mem + (reg))
-#define sdhci_msm_ice_readl(host, reg) \
- readl((host)->ice_mem + (reg))
-
-static bool sdhci_msm_ice_supported(struct sdhci_msm_host *msm_host)
-{
- struct device *dev = mmc_dev(msm_host->mmc);
- u32 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_VERSION);
- int major = regval >> 24;
- int minor = (regval >> 16) & 0xFF;
- int step = regval & 0xFFFF;
-
- /* For now this driver only supports ICE version 3. */
- if (major != 3) {
- dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n",
- major, minor, step);
- return false;
- }
-
- dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
- major, minor, step);
-
- /* If fuses are blown, ICE might not work in the standard way. */
- regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_FUSE_SETTING);
- if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
- QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK |
- QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) {
- dev_warn(dev, "Fuses are blown; ICE is unusable!\n");
- return false;
- }
- return true;
-}
-
-static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev)
-{
- return devm_clk_get(dev, "ice");
-}
+static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops; /* forward decl */
static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
struct cqhci_host *cq_host)
{
struct mmc_host *mmc = msm_host->mmc;
+ struct blk_crypto_profile *profile = &mmc->crypto_profile;
struct device *dev = mmc_dev(mmc);
- struct resource *res;
+ struct qcom_ice *ice;
+ union cqhci_crypto_capabilities caps;
+ union cqhci_crypto_cap_entry cap;
+ int err;
+ int i;
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
return 0;
- res = platform_get_resource_byname(msm_host->pdev, IORESOURCE_MEM,
- "ice");
- if (!res) {
- dev_warn(dev, "ICE registers not found\n");
- goto disable;
+ ice = devm_of_qcom_ice_get(dev);
+ if (ice == ERR_PTR(-EOPNOTSUPP)) {
+ dev_warn(dev, "Disabling inline encryption support\n");
+ ice = NULL;
}
- if (!qcom_scm_ice_available()) {
- dev_warn(dev, "ICE SCM interface not found\n");
- goto disable;
+ if (IS_ERR_OR_NULL(ice))
+ return PTR_ERR_OR_ZERO(ice);
+
+ if (qcom_ice_get_supported_key_type(ice) != BLK_CRYPTO_KEY_TYPE_RAW) {
+ dev_warn(dev, "Wrapped keys not supported. Disabling inline encryption support.\n");
+ return 0;
}
- msm_host->ice_mem = devm_ioremap_resource(dev, res);
- if (IS_ERR(msm_host->ice_mem))
- return PTR_ERR(msm_host->ice_mem);
+ msm_host->ice = ice;
- if (!sdhci_msm_ice_supported(msm_host))
- goto disable;
+ /* Initialize the blk_crypto_profile */
- mmc->caps2 |= MMC_CAP2_CRYPTO;
- return 0;
+ caps.reg_val = cpu_to_le32(cqhci_readl(cq_host, CQHCI_CCAP));
-disable:
- dev_warn(dev, "Disabling inline encryption support\n");
- return 0;
-}
+ /* The number of keyslots supported is (CFGC+1) */
+ err = devm_blk_crypto_profile_init(dev, profile, caps.config_count + 1);
+ if (err)
+ return err;
-static void sdhci_msm_ice_low_power_mode_enable(struct sdhci_msm_host *msm_host)
-{
- u32 regval;
+ profile->ll_ops = sdhci_msm_crypto_ops;
+ profile->max_dun_bytes_supported = 4;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
+ profile->dev = dev;
- regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL);
/*
- * Enable low power mode sequence
- * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0
+ * Currently this driver only supports AES-256-XTS. All known versions
+ * of ICE support it, but to be safe make sure it is really declared in
+ * the crypto capability registers. The crypto capability registers
+ * also give the supported data unit size(s).
*/
- regval |= 0x7000;
- sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
+ for (i = 0; i < caps.num_crypto_cap; i++) {
+ cap.reg_val = cpu_to_le32(cqhci_readl(cq_host,
+ CQHCI_CRYPTOCAP +
+ i * sizeof(__le32)));
+ if (cap.algorithm_id == CQHCI_CRYPTO_ALG_AES_XTS &&
+ cap.key_size == CQHCI_CRYPTO_KEY_SIZE_256)
+ profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |=
+ cap.sdus_mask * 512;
+ }
+
+ mmc->caps2 |= MMC_CAP2_CRYPTO;
+ return 0;
}
-static void sdhci_msm_ice_optimization_enable(struct sdhci_msm_host *msm_host)
+static void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
{
- u32 regval;
-
- /* ICE Optimizations Enable Sequence */
- regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL);
- regval |= 0xD807100;
- /* ICE HPG requires delay before writing */
- udelay(5);
- sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
- udelay(5);
+ if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)
+ qcom_ice_enable(msm_host->ice);
}
-/*
- * Wait until the ICE BIST (built-in self-test) has completed.
- *
- * This may be necessary before ICE can be used.
- *
- * Note that we don't really care whether the BIST passed or failed; we really
- * just want to make sure that it isn't still running. This is because (a) the
- * BIST is a FIPS compliance thing that never fails in practice, (b) ICE is
- * documented to reject crypto requests if the BIST fails, so we needn't do it
- * in software too, and (c) properly testing storage encryption requires testing
- * the full storage stack anyway, and not relying on hardware-level self-tests.
- */
-static int sdhci_msm_ice_wait_bist_status(struct sdhci_msm_host *msm_host)
+static int sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
{
- u32 regval;
- int err;
+ if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)
+ return qcom_ice_resume(msm_host->ice);
- err = readl_poll_timeout(msm_host->ice_mem + QCOM_ICE_REG_BIST_STATUS,
- regval, !(regval & QCOM_ICE_BIST_STATUS_MASK),
- 50, 5000);
- if (err)
- dev_err(mmc_dev(msm_host->mmc),
- "Timed out waiting for ICE self-test to complete\n");
- return err;
+ return 0;
}
-static void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
+static int sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host)
{
- if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO))
- return;
- sdhci_msm_ice_low_power_mode_enable(msm_host);
- sdhci_msm_ice_optimization_enable(msm_host);
- sdhci_msm_ice_wait_bist_status(msm_host);
+ if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)
+ return qcom_ice_suspend(msm_host->ice);
+
+ return 0;
}
-static int __maybe_unused sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
+static inline struct sdhci_msm_host *
+sdhci_msm_host_from_crypto_profile(struct blk_crypto_profile *profile)
{
- if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO))
- return 0;
- return sdhci_msm_ice_wait_bist_status(msm_host);
+ struct mmc_host *mmc = mmc_from_crypto_profile(profile);
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ return msm_host;
}
/*
- * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires
- * vendor-specific SCM calls for this; it doesn't support the standard way.
+ * Program a key into a QC ICE keyslot. QC ICE requires a QC-specific SCM call
+ * for this; it doesn't support the standard way.
*/
-static int sdhci_msm_program_key(struct cqhci_host *cq_host,
- const union cqhci_crypto_cfg_entry *cfg,
- int slot)
+static int sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
{
- struct device *dev = mmc_dev(cq_host->mmc);
- union cqhci_crypto_cap_entry cap;
- union {
- u8 bytes[AES_256_XTS_KEY_SIZE];
- u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)];
- } key;
- int i;
- int err;
+ struct sdhci_msm_host *msm_host =
+ sdhci_msm_host_from_crypto_profile(profile);
- if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
- return qcom_scm_ice_invalidate_key(slot);
+ return qcom_ice_program_key(msm_host->ice, slot, key);
+}
- /* Only AES-256-XTS has been tested so far. */
- cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
- if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
- cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256) {
- dev_err_ratelimited(dev,
- "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
- cap.algorithm_id, cap.key_size);
- return -EINVAL;
- }
+static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
+{
+ struct sdhci_msm_host *msm_host =
+ sdhci_msm_host_from_crypto_profile(profile);
- memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE);
+ return qcom_ice_evict_key(msm_host->ice, slot);
+}
- /*
- * The SCM call byte-swaps the 32-bit words of the key. So we have to
- * do the same, in order for the final key be correct.
- */
- for (i = 0; i < ARRAY_SIZE(key.words); i++)
- __cpu_to_be32s(&key.words[i]);
+static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops = {
+ .keyslot_program = sdhci_msm_ice_keyslot_program,
+ .keyslot_evict = sdhci_msm_ice_keyslot_evict,
+};
- err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
- QCOM_SCM_ICE_CIPHER_AES_256_XTS,
- cfg->data_unit_size);
- memzero_explicit(&key, sizeof(key));
- return err;
-}
#else /* CONFIG_MMC_CRYPTO */
-static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev)
-{
- return NULL;
-}
static inline int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
struct cqhci_host *cq_host)
@@ -2023,11 +2029,17 @@ static inline void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
{
}
-static inline int __maybe_unused
+static inline int
sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
{
return 0;
}
+
+static inline int
+sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host)
+{
+ return 0;
+}
#endif /* !CONFIG_MMC_CRYPTO */
/*****************************************************************************\
@@ -2089,11 +2101,28 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
sdhci_cqe_disable(mmc, recovery);
}
+static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ u32 count, start = 15;
+
+ __sdhci_set_timeout(host, cmd);
+ count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
+ /*
+ * Update software timeout value if its value is less than hardware data
+ * timeout value. Qcom SoC hardware data timeout value was calculated
+ * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
+ */
+ if (cmd && cmd->data && host->clock > 400000 &&
+ host->clock <= 50000000 &&
+ ((1 << (count + start)) > (10 * host->clock)))
+ host->data_timeout = 22LL * NSEC_PER_SEC;
+}
+
static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
.enable = sdhci_msm_cqe_enable,
.disable = sdhci_msm_cqe_disable,
#ifdef CONFIG_MMC_CRYPTO
- .program_key = sdhci_msm_program_key,
+ .uses_custom_crypto_profile = true,
#endif
};
@@ -2200,8 +2229,7 @@ static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
if (!msm_host->use_cdr)
break;
if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
- SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
- SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
+ !mmc_op_tuning(SDHCI_GET_CMD(val)))
sdhci_msm_set_cdr(host, true);
else
sdhci_msm_set_cdr(host, false);
@@ -2286,13 +2314,6 @@ static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
}
-static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
-{
- if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
- cqhci_deactivate(host->mmc);
- sdhci_reset(host, mask);
-}
-
static int sdhci_msm_register_vreg(struct sdhci_msm_host *msm_host)
{
int ret;
@@ -2417,8 +2438,13 @@ static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
};
static const struct of_device_id sdhci_msm_dt_match[] = {
+ /*
+ * Do not add new variants to the driver which are compatible with
+ * generic ones, unless they need customization.
+ */
{.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
{.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
+ {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
{},
@@ -2427,7 +2453,7 @@ static const struct of_device_id sdhci_msm_dt_match[] = {
MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
static const struct sdhci_ops sdhci_msm_ops = {
- .reset = sdhci_msm_reset,
+ .reset = sdhci_and_cqhci_reset,
.set_clock = sdhci_msm_set_clock,
.get_min_clock = sdhci_msm_get_min_clock,
.get_max_clock = sdhci_msm_get_max_clock,
@@ -2438,6 +2464,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
.irq = sdhci_msm_cqe_irq,
.dump_vendor_regs = sdhci_msm_dump_vendor_regs,
.set_power = sdhci_set_power_noreg,
+ .set_timeout = sdhci_msm_set_timeout,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -2462,8 +2489,48 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
msm_host->ddr_config = DDR_CONFIG_POR_VAL;
of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
+
+ if (of_device_is_compatible(node, "qcom,msm8916-sdhci"))
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA;
}
+static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
+{
+ struct reset_control *reset;
+ int ret = 0;
+
+ reset = reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "unable to acquire core_reset\n");
+
+ if (!reset)
+ return ret;
+
+ ret = reset_control_assert(reset);
+ if (ret) {
+ reset_control_put(reset);
+ return dev_err_probe(dev, ret, "core_reset assert failed\n");
+ }
+
+ /*
+ * The hardware requirement for delay between assert/deassert
+ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+ * ~125us (4/32768). To be on the safe side add 200us delay.
+ */
+ usleep_range(200, 210);
+
+ ret = reset_control_deassert(reset);
+ if (ret) {
+ reset_control_put(reset);
+ return dev_err_probe(dev, ret, "core_reset deassert failed\n");
+ }
+
+ usleep_range(200, 210);
+ reset_control_put(reset);
+
+ return ret;
+}
static int sdhci_msm_probe(struct platform_device *pdev)
{
@@ -2491,7 +2558,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
ret = mmc_of_parse(host->mmc);
if (ret)
- goto pltfm_free;
+ return ret;
/*
* Based on the compatible string, load the required msm host info from
@@ -2511,16 +2578,20 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+ ret = sdhci_msm_gcc_reset(&pdev->dev, host);
+ if (ret)
+ return ret;
+
/* Setup SDCC bus voter clock. */
msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (!IS_ERR(msm_host->bus_clk)) {
/* Vote for max. clk rate for max. performance */
ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
if (ret)
- goto pltfm_free;
+ return ret;
ret = clk_prepare_enable(msm_host->bus_clk);
if (ret)
- goto pltfm_free;
+ return ret;
}
/* Setup main peripheral bus clock */
@@ -2572,11 +2643,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
clk = NULL;
msm_host->bulk_clks[3].clk = clk;
- clk = sdhci_msm_ice_get_clk(&pdev->dev);
- if (IS_ERR(clk))
- clk = NULL;
- msm_host->bulk_clks[4].clk = clk;
-
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
msm_host->bulk_clks);
if (ret)
@@ -2669,7 +2735,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
sdhci_msm_handle_pwr_irq(host, 0);
/*
- * Ensure that above writes are propogated before interrupt enablement
+ * Ensure that above writes are propagated before interrupt enablement
* in GIC.
*/
mb();
@@ -2696,6 +2762,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+ /* Set the timeout value to max possible */
+ host->max_timeout_count = 0xF;
+
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -2713,7 +2782,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
if (ret)
goto pm_runtime_disable;
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -2728,12 +2796,10 @@ clk_disable:
bus_clk_disable:
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
-pltfm_free:
- sdhci_pltfm_free(pdev);
return ret;
}
-static int sdhci_msm_remove(struct platform_device *pdev)
+static void sdhci_msm_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -2751,29 +2817,33 @@ static int sdhci_msm_remove(struct platform_device *pdev)
msm_host->bulk_clks);
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
- sdhci_pltfm_free(pdev);
- return 0;
}
-static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
+static int sdhci_msm_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
/* Drop the performance vote */
dev_pm_opp_set_rate(dev, 0);
clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
msm_host->bulk_clks);
- return 0;
+ return sdhci_msm_ice_suspend(msm_host);
}
-static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
+static int sdhci_msm_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
@@ -2792,15 +2862,20 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
dev_pm_opp_set_rate(dev, msm_host->clk_rate);
- return sdhci_msm_ice_resume(msm_host);
+ ret = sdhci_msm_ice_resume(msm_host);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
- sdhci_msm_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume, NULL)
};
static struct platform_driver sdhci_msm_driver = {
@@ -2809,7 +2884,7 @@ static struct platform_driver sdhci_msm_driver = {
.driver = {
.name = "sdhci_msm",
.of_match_table = sdhci_msm_dt_match,
- .pm = &sdhci_msm_pm_ops,
+ .pm = pm_ptr(&sdhci_msm_pm_ops),
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/mmc/host/sdhci-npcm.c b/drivers/mmc/host/sdhci-npcm.c
new file mode 100644
index 000000000000..71b635dfdf1d
--- /dev/null
+++ b/drivers/mmc/host/sdhci-npcm.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NPCM SDHC MMC host controller driver.
+ *
+ * Copyright (c) 2023 Nuvoton Technology corporation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "sdhci-pltfm.h"
+
+static const struct sdhci_pltfm_data npcm7xx_sdhci_pdata = {
+ .quirks = SDHCI_QUIRK_DELAY_AFTER_POWER,
+ .quirks2 = SDHCI_QUIRK2_STOP_WITH_TC |
+ SDHCI_QUIRK2_NO_1_8_V,
+};
+
+static const struct sdhci_pltfm_data npcm8xx_sdhci_pdata = {
+ .quirks = SDHCI_QUIRK_DELAY_AFTER_POWER,
+ .quirks2 = SDHCI_QUIRK2_STOP_WITH_TC,
+};
+
+static int npcm_sdhci_probe(struct platform_device *pdev)
+{
+ const struct sdhci_pltfm_data *data;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host;
+ u32 caps;
+ int ret;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ host = sdhci_pltfm_init(pdev, data, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+
+ pltfm_host->clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(pltfm_host->clk)) {
+ return PTR_ERR(pltfm_host->clk);
+ }
+
+ caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ if (caps & SDHCI_CAN_DO_8BIT)
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ return ret;
+
+ return sdhci_add_host(host);
+}
+
+static const struct of_device_id npcm_sdhci_of_match[] = {
+ { .compatible = "nuvoton,npcm750-sdhci", .data = &npcm7xx_sdhci_pdata },
+ { .compatible = "nuvoton,npcm845-sdhci", .data = &npcm8xx_sdhci_pdata },
+ { }
+};
+MODULE_DEVICE_TABLE(of, npcm_sdhci_of_match);
+
+static struct platform_driver npcm_sdhci_driver = {
+ .driver = {
+ .name = "npcm-sdhci",
+ .of_match_table = npcm_sdhci_of_match,
+ .pm = &sdhci_pltfm_pmops,
+ },
+ .probe = npcm_sdhci_probe,
+ .remove = sdhci_pltfm_remove,
+};
+module_platform_driver(npcm_sdhci_driver);
+
+MODULE_DESCRIPTION("NPCM Secure Digital Host Controller Interface driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 0e7c07ed9690..b97d042897ad 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -18,13 +18,15 @@
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
-#include <linux/of.h>
+#include <linux/reset.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include "cqhci.h"
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
@@ -39,11 +41,43 @@
#define VENDOR_ENHANCED_STROBE BIT(0)
#define PHY_CLK_TOO_SLOW_HZ 400000
+#define MIN_PHY_CLK_HZ 50000000
#define SDHCI_ITAPDLY_CHGWIN 0x200
#define SDHCI_ITAPDLY_ENABLE 0x100
#define SDHCI_OTAPDLY_ENABLE 0x40
+#define PHY_CTRL_REG1 0x270
+#define PHY_CTRL_ITAPDLY_ENA_MASK BIT(0)
+#define PHY_CTRL_ITAPDLY_SEL_MASK GENMASK(5, 1)
+#define PHY_CTRL_ITAPDLY_SEL_SHIFT 1
+#define PHY_CTRL_ITAP_CHG_WIN_MASK BIT(6)
+#define PHY_CTRL_OTAPDLY_ENA_MASK BIT(8)
+#define PHY_CTRL_OTAPDLY_SEL_MASK GENMASK(15, 12)
+#define PHY_CTRL_OTAPDLY_SEL_SHIFT 12
+#define PHY_CTRL_STRB_SEL_MASK GENMASK(23, 16)
+#define PHY_CTRL_STRB_SEL_SHIFT 16
+#define PHY_CTRL_TEST_CTRL_MASK GENMASK(31, 24)
+
+#define PHY_CTRL_REG2 0x274
+#define PHY_CTRL_EN_DLL_MASK BIT(0)
+#define PHY_CTRL_DLL_RDY_MASK BIT(1)
+#define PHY_CTRL_FREQ_SEL_MASK GENMASK(6, 4)
+#define PHY_CTRL_FREQ_SEL_SHIFT 4
+#define PHY_CTRL_SEL_DLY_TX_MASK BIT(16)
+#define PHY_CTRL_SEL_DLY_RX_MASK BIT(17)
+#define FREQSEL_200M_170M 0x0
+#define FREQSEL_170M_140M 0x1
+#define FREQSEL_140M_110M 0x2
+#define FREQSEL_110M_80M 0x3
+#define FREQSEL_80M_50M 0x4
+#define FREQSEL_275M_250M 0x5
+#define FREQSEL_250M_225M 0x6
+#define FREQSEL_225M_200M 0x7
+#define PHY_DLL_TIMEOUT_MS 100
+
+#define SDHCI_HW_RST_EN BIT(4)
+
/* Default settings for ZynqMP Clock Phases */
#define ZYNQMP_ICLK_PHASE {0, 63, 63, 0, 63, 0, 0, 183, 54, 0, 0}
#define ZYNQMP_OCLK_PHASE {0, 72, 60, 0, 60, 72, 135, 48, 72, 135, 0}
@@ -51,6 +85,11 @@
#define VERSAL_ICLK_PHASE {0, 132, 132, 0, 132, 0, 0, 162, 90, 0, 0}
#define VERSAL_OCLK_PHASE {0, 60, 48, 0, 48, 72, 90, 36, 60, 90, 0}
+#define VERSAL_NET_EMMC_ICLK_PHASE {0, 0, 0, 0, 0, 0, 0, 0, 39, 0, 0}
+#define VERSAL_NET_EMMC_OCLK_PHASE {0, 113, 0, 0, 0, 0, 0, 0, 113, 79, 45}
+
+#define VERSAL_NET_PHY_CTRL_STRB90_STRB180_VAL 0X77
+
/*
* On some SoCs the syscon area has a feature where the upper 16-bits of
* each 32-bit register act as a write mask for the lower 16-bits. This allows
@@ -60,6 +99,9 @@
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
+#define CD_STABLE_TIMEOUT_US 1000000
+#define CD_STABLE_MAX_SLEEP_US 10
+
/**
* struct sdhci_arasan_soc_ctl_field - Field used in sdhci_arasan_soc_ctl_map
*
@@ -133,6 +175,7 @@ struct sdhci_arasan_clk_data {
* @clk_ahb: Pointer to the AHB clock
* @phy: Pointer to the generic phy
* @is_phy_on: True if the PHY is on; false if not.
+ * @internal_phy_reg: True if the PHY is within the Host controller.
* @has_cqe: True if controller has command queuing engine.
* @clk_data: Struct for the Arasan Controller Clock Data.
* @clk_ops: Struct for the Arasan Controller Clock Operations.
@@ -145,6 +188,7 @@ struct sdhci_arasan_data {
struct clk *clk_ahb;
struct phy *phy;
bool is_phy_on;
+ bool internal_phy_reg;
bool has_cqe;
struct sdhci_arasan_clk_data clk_data;
@@ -159,12 +203,21 @@ struct sdhci_arasan_data {
/* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the
* internal clock even when the clock isn't stable */
#define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1)
+/*
+ * Some of the Arasan variations might not have timing requirements
+ * met at 25MHz for Default Speed mode, those controllers work at
+ * 19MHz instead
+ */
+#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2)
+/* Enable CD stable check before power-up */
+#define SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE BIT(3)
};
struct sdhci_arasan_of_data {
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
const struct sdhci_pltfm_data *pdata;
const struct sdhci_arasan_clk_ops *clk_ops;
+ u32 quirks;
};
static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
@@ -192,6 +245,61 @@ static const struct sdhci_arasan_soc_ctl_map intel_keembay_soc_ctl_map = {
.hiword_update = false,
};
+static void sdhci_arasan_phy_set_delaychain(struct sdhci_host *host, bool enable)
+{
+ u32 reg;
+
+ reg = readl(host->ioaddr + PHY_CTRL_REG2);
+ if (enable)
+ reg |= (PHY_CTRL_SEL_DLY_TX_MASK | PHY_CTRL_SEL_DLY_RX_MASK);
+ else
+ reg &= ~(PHY_CTRL_SEL_DLY_TX_MASK | PHY_CTRL_SEL_DLY_RX_MASK);
+
+ writel(reg, host->ioaddr + PHY_CTRL_REG2);
+}
+
+static int sdhci_arasan_phy_set_dll(struct sdhci_host *host, bool enable)
+{
+ u32 reg;
+
+ reg = readl(host->ioaddr + PHY_CTRL_REG2);
+ if (enable)
+ reg |= PHY_CTRL_EN_DLL_MASK;
+ else
+ reg &= ~PHY_CTRL_EN_DLL_MASK;
+
+ writel(reg, host->ioaddr + PHY_CTRL_REG2);
+
+ if (!enable)
+ return 0;
+
+ return readl_relaxed_poll_timeout(host->ioaddr + PHY_CTRL_REG2, reg,
+ (reg & PHY_CTRL_DLL_RDY_MASK), 10,
+ 1000 * PHY_DLL_TIMEOUT_MS);
+}
+
+static void sdhci_arasan_phy_dll_set_freq(struct sdhci_host *host, int clock)
+{
+ u32 reg, freq_sel, freq;
+
+ freq = DIV_ROUND_CLOSEST(clock, 1000000);
+ if (freq <= 200 && freq > 170)
+ freq_sel = FREQSEL_200M_170M;
+ else if (freq <= 170 && freq > 140)
+ freq_sel = FREQSEL_170M_140M;
+ else if (freq <= 140 && freq > 110)
+ freq_sel = FREQSEL_140M_110M;
+ else if (freq <= 110 && freq > 80)
+ freq_sel = FREQSEL_110M_80M;
+ else
+ freq_sel = FREQSEL_80M_50M;
+
+ reg = readl(host->ioaddr + PHY_CTRL_REG2);
+ reg &= ~PHY_CTRL_FREQ_SEL_MASK;
+ reg |= (freq_sel << PHY_CTRL_FREQ_SEL_SHIFT);
+ writel(reg, host->ioaddr + PHY_CTRL_REG2);
+}
+
/**
* sdhci_arasan_syscon_write - Write to a field in soc_ctl registers
*
@@ -267,7 +375,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
* through low speeds without power cycling.
*/
sdhci_set_clock(host, host->max_clk);
- phy_power_on(sdhci_arasan->phy);
+ if (phy_power_on(sdhci_arasan->phy)) {
+ pr_err("%s: Cannot power on phy.\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+
sdhci_arasan->is_phy_on = true;
/*
@@ -290,12 +403,35 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_arasan->is_phy_on = false;
}
+ if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN) {
+ /*
+ * Some of the Arasan variations might not have timing
+ * requirements met at 25MHz for Default Speed mode,
+ * those controllers work at 19MHz instead.
+ */
+ if (clock == DEFAULT_SPEED_MAX_DTR)
+ clock = (DEFAULT_SPEED_MAX_DTR * 19) / 25;
+ }
+
/* Set the Input and Output Clock Phase Delays */
- if (clk_data->set_clk_delays)
+ if (clk_data->set_clk_delays && clock > PHY_CLK_TOO_SLOW_HZ)
clk_data->set_clk_delays(host);
+ if (sdhci_arasan->internal_phy_reg && clock >= MIN_PHY_CLK_HZ) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ sdhci_arasan_phy_set_dll(host, 0);
+ sdhci_arasan_phy_set_delaychain(host, 0);
+ sdhci_arasan_phy_dll_set_freq(host, clock);
+ } else if (sdhci_arasan->internal_phy_reg) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ sdhci_arasan_phy_set_delaychain(host, 1);
+ }
+
sdhci_set_clock(host, clock);
+ if (sdhci_arasan->internal_phy_reg && clock >= MIN_PHY_CLK_HZ)
+ sdhci_arasan_phy_set_dll(host, 1);
+
if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE)
/*
* Some controllers immediately report SDHCI_CLOCK_INT_STABLE
@@ -307,7 +443,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
msleep(20);
if (ctrl_phy) {
- phy_power_on(sdhci_arasan->phy);
+ if (phy_power_on(sdhci_arasan->phy)) {
+ pr_err("%s: Cannot power on phy.\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+
sdhci_arasan->is_phy_on = true;
}
}
@@ -333,7 +474,7 @@ static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) {
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
@@ -342,6 +483,21 @@ static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
}
}
+static void sdhci_arasan_hw_reset(struct sdhci_host *host)
+{
+ u8 reg;
+
+ reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ reg |= SDHCI_HW_RST_EN;
+ sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+ /* As per eMMC spec, minimum 1us is required but give it 2us for good measure */
+ usleep_range(2, 5);
+ reg &= ~SDHCI_HW_RST_EN;
+ sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+ /* As per eMMC spec, minimum 200us is required but give it 300us for good measure */
+ usleep_range(300, 500);
+}
+
static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -364,6 +520,24 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
}
+static void sdhci_arasan_set_power_and_bus_voltage(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ /*
+ * Ensure that the card detect logic has stabilized before powering up, this is
+ * necessary after a host controller reset.
+ */
+ if (mode == MMC_POWER_UP && sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE)
+ read_poll_timeout(sdhci_readl, reg, reg & SDHCI_CD_STABLE, CD_STABLE_MAX_SLEEP_US,
+ CD_STABLE_TIMEOUT_US, false, host, SDHCI_PRESENT_STATE);
+
+ sdhci_set_power_and_bus_voltage(host, mode, vdd);
+}
+
static const struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -371,7 +545,8 @@ static const struct sdhci_ops sdhci_arasan_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
- .set_power = sdhci_set_power_and_bus_voltage,
+ .set_power = sdhci_arasan_set_power_and_bus_voltage,
+ .hw_reset = sdhci_arasan_hw_reset,
};
static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask)
@@ -419,7 +594,7 @@ static const struct sdhci_ops sdhci_arasan_cqe_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
- .set_power = sdhci_set_power_and_bus_voltage,
+ .set_power = sdhci_arasan_set_power_and_bus_voltage,
.irq = sdhci_arasan_cqhci_irq,
};
@@ -430,7 +605,6 @@ static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
-#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
* @dev: Address of the device structure
@@ -463,7 +637,9 @@ static int sdhci_arasan_suspend(struct device *dev)
ret = phy_power_off(sdhci_arasan->phy);
if (ret) {
dev_err(dev, "Cannot power off phy.\n");
- sdhci_resume_host(host);
+ if (sdhci_resume_host(host))
+ dev_err(dev, "Cannot resume host.\n");
+
return ret;
}
sdhci_arasan->is_phy_on = false;
@@ -522,10 +698,9 @@ static int sdhci_arasan_resume(struct device *dev)
return 0;
}
-#endif /* ! CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
- sdhci_arasan_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
+ sdhci_arasan_resume);
/**
* sdhci_arasan_sdcardclk_recalc_rate - Return the card clock rate
@@ -851,6 +1026,101 @@ static const struct clk_ops versal_sampleclk_ops = {
.set_phase = sdhci_versal_sampleclk_set_phase,
};
+static int sdhci_versal_net_emmc_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ u8 tap_delay, tap_max = 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_MMC_DDR52:
+ tap_max = 16;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_MMC_HS400:
+ /* For 200MHz clock, 32 Taps are available */
+ tap_max = 32;
+ break;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ if (tap_delay) {
+ u32 regval;
+
+ regval = sdhci_readl(host, PHY_CTRL_REG1);
+ regval |= PHY_CTRL_OTAPDLY_ENA_MASK;
+ sdhci_writel(host, regval, PHY_CTRL_REG1);
+ regval &= ~PHY_CTRL_OTAPDLY_SEL_MASK;
+ regval |= tap_delay << PHY_CTRL_OTAPDLY_SEL_SHIFT;
+ sdhci_writel(host, regval, PHY_CTRL_REG1);
+ }
+
+ return 0;
+}
+
+static const struct clk_ops versal_net_sdcardclk_ops = {
+ .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate,
+ .set_phase = sdhci_versal_net_emmc_sdcardclk_set_phase,
+};
+
+static int sdhci_versal_net_emmc_sampleclk_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ u8 tap_delay, tap_max = 0;
+ u32 regval;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_MMC_DDR52:
+ tap_max = 32;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ /* Strobe select tap point for strb90 and strb180 */
+ regval = sdhci_readl(host, PHY_CTRL_REG1);
+ regval &= ~PHY_CTRL_STRB_SEL_MASK;
+ regval |= VERSAL_NET_PHY_CTRL_STRB90_STRB180_VAL << PHY_CTRL_STRB_SEL_SHIFT;
+ sdhci_writel(host, regval, PHY_CTRL_REG1);
+ break;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ if (tap_delay) {
+ regval = sdhci_readl(host, PHY_CTRL_REG1);
+ regval |= PHY_CTRL_ITAP_CHG_WIN_MASK;
+ sdhci_writel(host, regval, PHY_CTRL_REG1);
+ regval |= PHY_CTRL_ITAPDLY_ENA_MASK;
+ sdhci_writel(host, regval, PHY_CTRL_REG1);
+ regval &= ~PHY_CTRL_ITAPDLY_SEL_MASK;
+ regval |= tap_delay << PHY_CTRL_ITAPDLY_SEL_SHIFT;
+ sdhci_writel(host, regval, PHY_CTRL_REG1);
+ regval &= ~PHY_CTRL_ITAP_CHG_WIN_MASK;
+ sdhci_writel(host, regval, PHY_CTRL_REG1);
+ }
+
+ return 0;
+}
+
+static const struct clk_ops versal_net_sampleclk_ops = {
+ .recalc_rate = sdhci_arasan_sampleclk_recalc_rate,
+ .set_phase = sdhci_versal_net_emmc_sampleclk_set_phase,
+};
+
static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid)
{
u16 clk;
@@ -878,6 +1148,10 @@ static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode)
NODE_SD_1;
int err;
+ /* ZynqMP SD controller does not perform auto tuning in DDR50 mode */
+ if (mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ return 0;
+
arasan_zynqmp_dll_reset(host, device_id);
err = sdhci_execute_tuning(mmc, opcode);
@@ -952,7 +1226,7 @@ static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map =
sdhci_arasan->soc_ctl_map;
- u32 mhz = DIV_ROUND_CLOSEST(clk_get_rate(pltfm_host->clk), 1000000);
+ u32 mhz = DIV_ROUND_CLOSEST_ULL(clk_get_rate(pltfm_host->clk), 1000000);
/* Having a map is optional */
if (!soc_ctl_map)
@@ -986,14 +1260,16 @@ static void arasan_dt_read_clk_phase(struct device *dev,
{
struct device_node *np = dev->of_node;
- int clk_phase[2] = {0};
+ u32 clk_phase[2] = {0};
+ int ret;
/*
* Read Tap Delay values from DT, if the DT does not contain the
* Tap Values then use the pre-defined values.
*/
- if (of_property_read_variable_u32_array(np, prop, &clk_phase[0],
- 2, 0)) {
+ ret = of_property_read_variable_u32_array(np, prop, &clk_phase[0],
+ 2, 0);
+ if (ret < 0) {
dev_dbg(dev, "Using predefined clock phase for %s = %d %d\n",
prop, clk_data->clk_phase_in[timing],
clk_data->clk_phase_out[timing]);
@@ -1055,7 +1331,17 @@ static void arasan_dt_parse_clk_phases(struct device *dev,
clk_data->clk_phase_out[i] = versal_oclk_phase[i];
}
}
+ if (of_device_is_compatible(dev->of_node, "xlnx,versal-net-emmc")) {
+ u32 versal_net_iclk_phase[MMC_TIMING_MMC_HS400 + 1] =
+ VERSAL_NET_EMMC_ICLK_PHASE;
+ u32 versal_net_oclk_phase[MMC_TIMING_MMC_HS400 + 1] =
+ VERSAL_NET_EMMC_OCLK_PHASE;
+ for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) {
+ clk_data->clk_phase_in[i] = versal_net_iclk_phase[i];
+ clk_data->clk_phase_out[i] = versal_net_oclk_phase[i];
+ }
+ }
arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_LEGACY,
"clk-phase-legacy");
arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS,
@@ -1167,6 +1453,14 @@ static const struct sdhci_pltfm_data sdhci_arasan_zynqmp_pdata = {
SDHCI_QUIRK2_STOP_WITH_TC,
};
+static const struct sdhci_pltfm_data sdhci_arasan_versal_net_pdata = {
+ .ops = &sdhci_arasan_ops,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC |
+ SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
+};
+
static const struct sdhci_arasan_clk_ops zynqmp_clk_ops = {
.sdcardclk_ops = &zynqmp_sdcardclk_ops,
.sampleclk_ops = &zynqmp_sampleclk_ops,
@@ -1175,6 +1469,7 @@ static const struct sdhci_arasan_clk_ops zynqmp_clk_ops = {
static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = {
.pdata = &sdhci_arasan_zynqmp_pdata,
.clk_ops = &zynqmp_clk_ops,
+ .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
};
static const struct sdhci_arasan_clk_ops versal_clk_ops = {
@@ -1185,6 +1480,18 @@ static const struct sdhci_arasan_clk_ops versal_clk_ops = {
static struct sdhci_arasan_of_data sdhci_arasan_versal_data = {
.pdata = &sdhci_arasan_zynqmp_pdata,
.clk_ops = &versal_clk_ops,
+ .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
+};
+
+static const struct sdhci_arasan_clk_ops versal_net_clk_ops = {
+ .sdcardclk_ops = &versal_net_sdcardclk_ops,
+ .sampleclk_ops = &versal_net_sampleclk_ops,
+};
+
+static struct sdhci_arasan_of_data sdhci_arasan_versal_net_data = {
+ .pdata = &sdhci_arasan_versal_net_pdata,
+ .clk_ops = &versal_net_clk_ops,
+ .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
};
static struct sdhci_arasan_of_data intel_keembay_emmc_data = {
@@ -1252,6 +1559,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "xlnx,versal-8.9a",
.data = &sdhci_arasan_versal_data,
},
+ {
+ .compatible = "xlnx,versal-net-emmc",
+ .data = &sdhci_arasan_versal_net_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
@@ -1372,7 +1683,7 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
{
struct device_node *np = dev->of_node;
- if (!of_find_property(np, "#clock-cells", NULL))
+ if (!of_property_present(np, "#clock-cells"))
return;
of_clk_del_provider(dev->of_node);
@@ -1461,6 +1772,65 @@ static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
return 0;
}
+static int sdhci_zynqmp_set_dynamic_config(struct device *dev,
+ struct sdhci_arasan_data *sdhci_arasan)
+{
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct clk_hw *hw = &sdhci_arasan->clk_data.sdcardclk_hw;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ const char *clk_name = clk_hw_get_name(hw);
+ u32 mhz, node_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 : NODE_SD_1;
+ struct reset_control *rstc;
+ int ret;
+
+ /* Obtain SDHC reset control */
+ rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(rstc)) {
+ dev_err(dev, "Cannot get SDHC reset.\n");
+ return PTR_ERR(rstc);
+ }
+
+ ret = reset_control_assert(rstc);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_set_sd_config(node_id, SD_CONFIG_FIXED, 0);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_set_sd_config(node_id, SD_CONFIG_EMMC_SEL,
+ !!(host->mmc->caps & MMC_CAP_NONREMOVABLE));
+ if (ret)
+ return ret;
+
+ mhz = DIV_ROUND_CLOSEST_ULL(clk_get_rate(pltfm_host->clk), 1000000);
+ if (mhz > 100 && mhz <= 200)
+ mhz = 200;
+ else if (mhz > 50 && mhz <= 100)
+ mhz = 100;
+ else if (mhz > 25 && mhz <= 50)
+ mhz = 50;
+ else
+ mhz = 25;
+
+ ret = zynqmp_pm_set_sd_config(node_id, SD_CONFIG_BASECLK, mhz);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_set_sd_config(node_id, SD_CONFIG_8BIT,
+ !!(host->mmc->caps & MMC_CAP_8_BIT_DATA));
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(rstc);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 1500);
+
+ return 0;
+}
+
static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan)
{
struct sdhci_host *host = sdhci_arasan->host;
@@ -1509,6 +1879,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
int ret;
struct device_node *node;
struct clk *clk_xin;
+ struct clk *clk_dll;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct device *dev = &pdev->dev;
@@ -1517,6 +1888,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
const struct sdhci_arasan_of_data *data;
data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
host = sdhci_pltfm_init(pdev, data->pdata, sizeof(*sdhci_arasan));
if (IS_ERR(host))
@@ -1534,34 +1908,26 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan->soc_ctl_base = syscon_node_to_regmap(node);
of_node_put(node);
- if (IS_ERR(sdhci_arasan->soc_ctl_base)) {
- ret = dev_err_probe(dev,
+ if (IS_ERR(sdhci_arasan->soc_ctl_base))
+ return dev_err_probe(dev,
PTR_ERR(sdhci_arasan->soc_ctl_base),
"Can't get syscon\n");
- goto err_pltfm_free;
- }
}
sdhci_get_of_property(pdev);
sdhci_arasan->clk_ahb = devm_clk_get(dev, "clk_ahb");
- if (IS_ERR(sdhci_arasan->clk_ahb)) {
- ret = dev_err_probe(dev, PTR_ERR(sdhci_arasan->clk_ahb),
+ if (IS_ERR(sdhci_arasan->clk_ahb))
+ return dev_err_probe(dev, PTR_ERR(sdhci_arasan->clk_ahb),
"clk_ahb clock not found.\n");
- goto err_pltfm_free;
- }
clk_xin = devm_clk_get(dev, "clk_xin");
- if (IS_ERR(clk_xin)) {
- ret = dev_err_probe(dev, PTR_ERR(clk_xin), "clk_xin clock not found.\n");
- goto err_pltfm_free;
- }
+ if (IS_ERR(clk_xin))
+ return dev_err_probe(dev, PTR_ERR(clk_xin), "clk_xin clock not found.\n");
ret = clk_prepare_enable(sdhci_arasan->clk_ahb);
- if (ret) {
- dev_err(dev, "Unable to enable AHB clock.\n");
- goto err_pltfm_free;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Unable to enable AHB clock.\n");
/* If clock-frequency property is set, use the provided value */
if (pltfm_host->clock &&
@@ -1579,6 +1945,12 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto clk_dis_ahb;
}
+ clk_dll = devm_clk_get_optional_enabled(dev, "gate");
+ if (IS_ERR(clk_dll)) {
+ ret = dev_err_probe(dev, PTR_ERR(clk_dll), "failed to get dll clk\n");
+ goto clk_disable_all;
+ }
+
if (of_property_read_bool(np, "xlnx,fails-without-test-cd"))
sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_FORCE_CDTEST;
@@ -1590,6 +1962,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "rockchip,rk3399-sdhci-5.1"))
sdhci_arasan_update_clockmultiplier(host, 0x0);
+ sdhci_arasan->quirks |= data->quirks;
+
if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") ||
of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") ||
of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) {
@@ -1608,16 +1982,28 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
host->mmc_host_ops.execute_tuning =
arasan_zynqmp_execute_tuning;
+
+ sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN;
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
}
arasan_dt_parse_clk_phases(dev, &sdhci_arasan->clk_data);
ret = mmc_of_parse(host->mmc);
if (ret) {
- ret = dev_err_probe(dev, ret, "parsing dt failed.\n");
+ dev_err_probe(dev, ret, "parsing dt failed.\n");
goto unreg_clk;
}
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_SD_CONFIG);
+ if (!ret) {
+ ret = sdhci_zynqmp_set_dynamic_config(dev, sdhci_arasan);
+ if (ret)
+ goto unreg_clk;
+ }
+ }
+
sdhci_arasan->phy = ERR_PTR(-ENODEV);
if (of_device_is_compatible(np, "arasan,sdhci-5.1")) {
sdhci_arasan->phy = devm_phy_get(dev, "phy_arasan");
@@ -1644,6 +2030,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
}
+ if (of_device_is_compatible(np, "xlnx,versal-net-emmc"))
+ sdhci_arasan->internal_phy_reg = true;
+
ret = sdhci_arasan_add_host(sdhci_arasan);
if (ret)
goto err_add_host;
@@ -1659,18 +2048,16 @@ clk_disable_all:
clk_disable_unprepare(clk_xin);
clk_dis_ahb:
clk_disable_unprepare(sdhci_arasan->clk_ahb);
-err_pltfm_free:
- sdhci_pltfm_free(pdev);
return ret;
}
-static int sdhci_arasan_remove(struct platform_device *pdev)
+static void sdhci_arasan_remove(struct platform_device *pdev)
{
- int ret;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
struct clk *clk_ahb = sdhci_arasan->clk_ahb;
+ struct clk *clk_xin = pltfm_host->clk;
if (!IS_ERR(sdhci_arasan->phy)) {
if (sdhci_arasan->is_phy_on)
@@ -1680,11 +2067,10 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
sdhci_arasan_unregister_sdclk(&pdev->dev);
- ret = sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_remove(pdev);
+ clk_disable_unprepare(clk_xin);
clk_disable_unprepare(clk_ahb);
-
- return ret;
}
static struct platform_driver sdhci_arasan_driver = {
@@ -1692,7 +2078,7 @@ static struct platform_driver sdhci_arasan_driver = {
.name = "sdhci-arasan",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_arasan_of_match,
- .pm = &sdhci_arasan_dev_pm_ops,
+ .pm = pm_sleep_ptr(&sdhci_arasan_dev_pm_ops),
},
.probe = sdhci_arasan_probe,
.remove = sdhci_arasan_remove,
diff --git a/drivers/mmc/host/sdhci-of-aspeed-test.c b/drivers/mmc/host/sdhci-of-aspeed-test.c
index 1ed4f86291f2..ecb502606c53 100644
--- a/drivers/mmc/host/sdhci-of-aspeed-test.c
+++ b/drivers/mmc/host/sdhci-of-aspeed-test.c
@@ -96,10 +96,4 @@ static struct kunit_suite aspeed_sdhci_test_suite = {
.test_cases = aspeed_sdhci_test_cases,
};
-static struct kunit_suite *aspeed_sdc_test_suite_array[] = {
- &aspeed_sdhci_test_suite,
- NULL,
-};
-
-static struct kunit_suite **aspeed_sdc_test_suites
- __used __section(".kunit_test_suites") = aspeed_sdc_test_suite_array;
+kunit_test_suite(aspeed_sdhci_test_suite);
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 6e4e132903a6..ca97b01996b1 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -425,17 +425,15 @@ static int aspeed_sdhci_probe(struct platform_device *pdev)
return PTR_ERR(pltfm_host->clk);
ret = clk_prepare_enable(pltfm_host->clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable SDIO clock\n");
- goto err_pltfm_free;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Unable to enable SDIO clock\n");
ret = mmc_of_parse(host->mmc);
if (ret)
goto err_sdhci_add;
if (dev->phase_desc)
- mmc_of_parse_clk_phase(host->mmc, &dev->phase_map);
+ mmc_of_parse_clk_phase(&pdev->dev, &dev->phase_map);
ret = sdhci_add_host(host);
if (ret)
@@ -445,27 +443,20 @@ static int aspeed_sdhci_probe(struct platform_device *pdev)
err_sdhci_add:
clk_disable_unprepare(pltfm_host->clk);
-err_pltfm_free:
- sdhci_pltfm_free(pdev);
return ret;
}
-static int aspeed_sdhci_remove(struct platform_device *pdev)
+static void aspeed_sdhci_remove(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
- int dead = 0;
host = platform_get_drvdata(pdev);
pltfm_host = sdhci_priv(host);
- sdhci_remove_host(host, dead);
+ sdhci_remove_host(host, 0);
clk_disable_unprepare(pltfm_host->clk);
-
- sdhci_pltfm_free(pdev);
-
- return 0;
}
static const struct aspeed_sdhci_pdata ast2400_sdhci_pdata = {
@@ -513,6 +504,7 @@ static const struct of_device_id aspeed_sdhci_of_match[] = {
{ .compatible = "aspeed,ast2600-sdhci", .data = &ast2600_sdhci_pdata, },
{ }
};
+MODULE_DEVICE_TABLE(of, aspeed_sdhci_of_match);
static struct platform_driver aspeed_sdhci_driver = {
.driver = {
@@ -547,8 +539,7 @@ static int aspeed_sdc_probe(struct platform_device *pdev)
return ret;
}
- sdc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdc->regs = devm_ioremap_resource(&pdev->dev, sdc->res);
+ sdc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &sdc->res);
if (IS_ERR(sdc->regs)) {
ret = PTR_ERR(sdc->regs);
goto err_clk;
@@ -575,13 +566,11 @@ err_clk:
return ret;
}
-static int aspeed_sdc_remove(struct platform_device *pdev)
+static void aspeed_sdc_remove(struct platform_device *pdev)
{
struct aspeed_sdc *sdc = dev_get_drvdata(&pdev->dev);
clk_disable_unprepare(sdc->clk);
-
- return 0;
}
static const struct of_device_id aspeed_sdc_of_match[] = {
@@ -606,25 +595,6 @@ static struct platform_driver aspeed_sdc_driver = {
#if defined(CONFIG_MMC_SDHCI_OF_ASPEED_TEST)
#include "sdhci-of-aspeed-test.c"
-
-static inline int aspeed_sdc_tests_init(void)
-{
- return __kunit_test_suites_init(aspeed_sdc_test_suites);
-}
-
-static inline void aspeed_sdc_tests_exit(void)
-{
- __kunit_test_suites_exit(aspeed_sdc_test_suites);
-}
-#else
-static inline int aspeed_sdc_tests_init(void)
-{
- return 0;
-}
-
-static inline void aspeed_sdc_tests_exit(void)
-{
-}
#endif
static int __init aspeed_sdc_init(void)
@@ -637,18 +607,7 @@ static int __init aspeed_sdc_init(void)
rc = platform_driver_register(&aspeed_sdc_driver);
if (rc < 0)
- goto cleanup_sdhci;
-
- rc = aspeed_sdc_tests_init();
- if (rc < 0) {
- platform_driver_unregister(&aspeed_sdc_driver);
- goto cleanup_sdhci;
- }
-
- return 0;
-
-cleanup_sdhci:
- platform_driver_unregister(&aspeed_sdhci_driver);
+ platform_driver_unregister(&aspeed_sdhci_driver);
return rc;
}
@@ -656,8 +615,6 @@ module_init(aspeed_sdc_init);
static void __exit aspeed_sdc_exit(void)
{
- aspeed_sdc_tests_exit();
-
platform_driver_unregister(&aspeed_sdc_driver);
platform_driver_unregister(&aspeed_sdhci_driver);
}
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 5564d7b23e7c..7c4ac65f247d 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -11,12 +11,13 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -61,7 +62,6 @@ static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
{
u16 clk;
- unsigned long timeout;
host->mmc->actual_clock = 0;
@@ -86,16 +86,11 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Wait max 20 ms */
- timeout = 20;
- while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
- & SDHCI_CLOCK_INT_STABLE)) {
- if (timeout == 0) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- return;
- }
- timeout--;
- mdelay(1);
+ if (read_poll_timeout(sdhci_readw, clk, (clk & SDHCI_CLOCK_INT_STABLE),
+ 1000, 20000, false, host, SDHCI_CLOCK_CONTROL)) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ return;
}
clk |= SDHCI_CLOCK_CARD_EN;
@@ -105,8 +100,13 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
- if (timing == MMC_TIMING_MMC_DDR52)
- sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
+ u8 mc1r;
+
+ if (timing == MMC_TIMING_MMC_DDR52) {
+ mc1r = sdhci_readb(host, SDMMC_MC1R);
+ mc1r |= SDMMC_MC1R_DDR;
+ sdhci_writeb(host, mc1r, SDMMC_MC1R);
+ }
sdhci_set_uhs_signaling(host, timing);
}
@@ -114,6 +114,7 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ unsigned int tmp;
sdhci_reset(host, mask);
@@ -126,6 +127,10 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
sdhci_writel(host, calcr | SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
SDMMC_CALCR);
+
+ if (read_poll_timeout(sdhci_readl, tmp, !(tmp & SDMMC_CALCR_EN),
+ 10, 20000, false, host, SDMMC_CALCR))
+ dev_err(mmc_dev(host->mmc), "Failed to calibrate\n");
}
}
@@ -224,7 +229,6 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_at91_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -238,17 +242,14 @@ static int sdhci_at91_suspend(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
static int sdhci_at91_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
- int ret;
- ret = sdhci_runtime_suspend_host(host);
+ sdhci_runtime_suspend_host(host);
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
@@ -257,7 +258,7 @@ static int sdhci_at91_runtime_suspend(struct device *dev)
clk_disable_unprepare(priv->hclock);
clk_disable_unprepare(priv->mainck);
- return ret;
+ return 0;
}
static int sdhci_at91_runtime_resume(struct device *dev)
@@ -295,30 +296,26 @@ static int sdhci_at91_runtime_resume(struct device *dev)
}
out:
- return sdhci_runtime_resume_host(host, 0);
+ sdhci_runtime_resume_host(host, 0);
+ return 0;
}
-#endif /* CONFIG_PM */
static const struct dev_pm_ops sdhci_at91_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_at91_suspend, pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(sdhci_at91_runtime_suspend,
- sdhci_at91_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_at91_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(sdhci_at91_runtime_suspend, sdhci_at91_runtime_resume, NULL)
};
static int sdhci_at91_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct sdhci_at91_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_at91_priv *priv;
int ret;
- match = of_match_device(sdhci_at91_dt_match, &pdev->dev);
- if (!match)
+ soc_data = of_device_get_match_data(&pdev->dev);
+ if (!soc_data)
return -EINVAL;
- soc_data = match->data;
host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv));
if (IS_ERR(host))
@@ -330,32 +327,26 @@ static int sdhci_at91_probe(struct platform_device *pdev)
priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
if (IS_ERR(priv->mainck)) {
- if (soc_data->baseclk_is_generated_internally) {
+ if (soc_data->baseclk_is_generated_internally)
priv->mainck = NULL;
- } else {
- dev_err(&pdev->dev, "failed to get baseclk\n");
- ret = PTR_ERR(priv->mainck);
- goto sdhci_pltfm_free;
- }
+ else
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->mainck),
+ "failed to get baseclk\n");
}
priv->hclock = devm_clk_get(&pdev->dev, "hclock");
- if (IS_ERR(priv->hclock)) {
- dev_err(&pdev->dev, "failed to get hclock\n");
- ret = PTR_ERR(priv->hclock);
- goto sdhci_pltfm_free;
- }
+ if (IS_ERR(priv->hclock))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->hclock),
+ "failed to get hclock\n");
priv->gck = devm_clk_get(&pdev->dev, "multclk");
- if (IS_ERR(priv->gck)) {
- dev_err(&pdev->dev, "failed to get multclk\n");
- ret = PTR_ERR(priv->gck);
- goto sdhci_pltfm_free;
- }
+ if (IS_ERR(priv->gck))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->gck),
+ "failed to get multclk\n");
ret = sdhci_at91_set_clks_presets(&pdev->dev);
if (ret)
- goto sdhci_pltfm_free;
+ return ret;
priv->restore_needed = false;
@@ -435,12 +426,10 @@ clocks_disable_unprepare:
clk_disable_unprepare(priv->gck);
clk_disable_unprepare(priv->mainck);
clk_disable_unprepare(priv->hclock);
-sdhci_pltfm_free:
- sdhci_pltfm_free(pdev);
return ret;
}
-static int sdhci_at91_remove(struct platform_device *pdev)
+static void sdhci_at91_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -453,13 +442,11 @@ static int sdhci_at91_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_remove(pdev);
clk_disable_unprepare(gck);
clk_disable_unprepare(hclock);
clk_disable_unprepare(mainck);
-
- return 0;
}
static struct platform_driver sdhci_at91_driver = {
@@ -467,7 +454,7 @@ static struct platform_driver sdhci_at91_driver = {
.name = "sdhci-at91",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_at91_dt_match,
- .pm = &sdhci_at91_dev_pm_ops,
+ .pm = pm_ptr(&sdhci_at91_dev_pm_ops),
},
.probe = sdhci_at91_probe,
.remove = sdhci_at91_remove,
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index bac874ab0b33..51949cde0958 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -8,16 +8,27 @@
*/
#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/sizes.h>
+#include <linux/mfd/syscon.h>
+#include <linux/units.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
+#include "sdhci-cqhci.h"
#define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16)
@@ -30,15 +41,55 @@
/* Offset inside the vendor area 1 */
#define DWCMSHC_HOST_CTRL3 0x8
#define DWCMSHC_EMMC_CONTROL 0x2c
+#define DWCMSHC_CARD_IS_EMMC BIT(0)
#define DWCMSHC_ENHANCED_STROBE BIT(8)
#define DWCMSHC_EMMC_ATCTRL 0x40
+#define DWCMSHC_AT_STAT 0x44
+/* Tuning and auto-tuning fields in AT_CTRL_R control register */
+#define AT_CTRL_AT_EN BIT(0) /* autotuning is enabled */
+#define AT_CTRL_CI_SEL BIT(1) /* interval to drive center phase select */
+#define AT_CTRL_SWIN_TH_EN BIT(2) /* sampling window threshold enable */
+#define AT_CTRL_RPT_TUNE_ERR BIT(3) /* enable reporting framing errors */
+#define AT_CTRL_SW_TUNE_EN BIT(4) /* enable software managed tuning */
+#define AT_CTRL_WIN_EDGE_SEL_MASK GENMASK(11, 8) /* bits [11:8] */
+#define AT_CTRL_WIN_EDGE_SEL 0xf /* sampling window edge select */
+#define AT_CTRL_TUNE_CLK_STOP_EN BIT(16) /* clocks stopped during phase code change */
+#define AT_CTRL_PRE_CHANGE_DLY_MASK GENMASK(18, 17) /* bits [18:17] */
+#define AT_CTRL_PRE_CHANGE_DLY 0x1 /* 2-cycle latency */
+#define AT_CTRL_POST_CHANGE_DLY_MASK GENMASK(20, 19) /* bits [20:19] */
+#define AT_CTRL_POST_CHANGE_DLY 0x3 /* 4-cycle latency */
+#define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */
+#define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */
+
+/* DWC IP vendor area 2 pointer */
+#define DWCMSHC_P_VENDOR_AREA2 0xea
+
+/* Sophgo CV18XX specific Registers */
+#define CV18XX_SDHCI_MSHC_CTRL 0x00
+#define CV18XX_EMMC_FUNC_EN BIT(0)
+#define CV18XX_LATANCY_1T BIT(1)
+#define CV18XX_SDHCI_PHY_TX_RX_DLY 0x40
+#define CV18XX_PHY_TX_DLY_MSK GENMASK(6, 0)
+#define CV18XX_PHY_TX_SRC_MSK GENMASK(9, 8)
+#define CV18XX_PHY_TX_SRC_INVERT_CLK_TX 0x1
+#define CV18XX_PHY_RX_DLY_MSK GENMASK(22, 16)
+#define CV18XX_PHY_RX_SRC_MSK GENMASK(25, 24)
+#define CV18XX_PHY_RX_SRC_INVERT_RX_CLK 0x1
+#define CV18XX_SDHCI_PHY_CONFIG 0x4c
+#define CV18XX_PHY_TX_BPS BIT(0)
+
+#define CV18XX_TUNE_MAX 128
+#define CV18XX_TUNE_STEP 1
+#define CV18XX_RETRY_TUNING_MAX 50
/* Rockchip specific Registers */
#define DWCMSHC_EMMC_DLL_CTRL 0x800
#define DWCMSHC_EMMC_DLL_RXCLK 0x804
#define DWCMSHC_EMMC_DLL_TXCLK 0x808
#define DWCMSHC_EMMC_DLL_STRBIN 0x80c
-#define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
+#define DECMSHC_EMMC_DLL_CMDOUT 0x810
+#define DECMSHC_EMMC_MISC_CON 0x81C
+#define MISC_INTCLK_EN BIT(1)
#define DWCMSHC_EMMC_DLL_STATUS0 0x840
#define DWCMSHC_EMMC_DLL_START BIT(0)
#define DWCMSHC_EMMC_DLL_LOCKED BIT(8)
@@ -46,32 +97,238 @@
#define DWCMSHC_EMMC_DLL_RXCLK_SRCSEL 29
#define DWCMSHC_EMMC_DLL_START_POINT 16
#define DWCMSHC_EMMC_DLL_INC 8
+#define DWCMSHC_EMMC_DLL_BYPASS BIT(24)
#define DWCMSHC_EMMC_DLL_DLYENA BIT(27)
-#define DLL_TXCLK_TAPNUM_DEFAULT 0x8
-#define DLL_STRBIN_TAPNUM_DEFAULT 0x8
+#define DLL_TXCLK_TAPNUM_DEFAULT 0x10
+#define DLL_TXCLK_TAPNUM_90_DEGREES 0xA
#define DLL_TXCLK_TAPNUM_FROM_SW BIT(24)
+#define DLL_STRBIN_TAPNUM_DEFAULT 0x4
+#define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
+#define DLL_STRBIN_DELAY_NUM_SEL BIT(26)
+#define DLL_STRBIN_DELAY_NUM_OFFSET 16
+#define DLL_STRBIN_DELAY_NUM_DEFAULT 0x16
#define DLL_RXCLK_NO_INVERTER 1
#define DLL_RXCLK_INVERTER 0
+#define DLL_CMDOUT_TAPNUM_90_DEGREES 0x8
+#define DLL_RXCLK_ORI_GATE BIT(31)
+#define DLL_CMDOUT_TAPNUM_FROM_SW BIT(24)
+#define DLL_CMDOUT_SRC_CLK_NEG BIT(28)
+#define DLL_CMDOUT_EN_SRC_CLK_NEG BIT(29)
+
#define DLL_LOCK_WO_TMOUT(x) \
((((x) & DWCMSHC_EMMC_DLL_LOCKED) == DWCMSHC_EMMC_DLL_LOCKED) && \
(((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0))
-#define RK3568_MAX_CLKS 3
+
+/* PHY register area pointer */
+#define DWC_MSHC_PTR_PHY_R 0x300
+
+/* PHY general configuration */
+#define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00)
+#define PHY_CNFG_RSTN_DEASSERT 0x1 /* Deassert PHY reset */
+#define PHY_CNFG_PHY_PWRGOOD_MASK BIT_MASK(1) /* bit [1] */
+#define PHY_CNFG_PAD_SP_MASK GENMASK(19, 16) /* bits [19:16] */
+#define PHY_CNFG_PAD_SP 0x0c /* PMOS TX drive strength */
+#define PHY_CNFG_PAD_SP_SG2042 0x09 /* PMOS TX drive strength for SG2042 */
+#define PHY_CNFG_PAD_SN_MASK GENMASK(23, 20) /* bits [23:20] */
+#define PHY_CNFG_PAD_SN 0x0c /* NMOS TX drive strength */
+#define PHY_CNFG_PAD_SN_SG2042 0x08 /* NMOS TX drive strength for SG2042 */
+
+/* PHY command/response pad settings */
+#define PHY_CMDPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x04)
+
+/* PHY data pad settings */
+#define PHY_DATAPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x06)
+
+/* PHY clock pad settings */
+#define PHY_CLKPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x08)
+
+/* PHY strobe pad settings */
+#define PHY_STBPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0a)
+
+/* PHY reset pad settings */
+#define PHY_RSTNPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0c)
+
+/* Bitfields are common for all pad settings */
+#define PHY_PAD_RXSEL_1V8 0x1 /* Receiver type select for 1.8V */
+#define PHY_PAD_RXSEL_3V3 0x2 /* Receiver type select for 3.3V */
+
+#define PHY_PAD_WEAKPULL_MASK GENMASK(4, 3) /* bits [4:3] */
+#define PHY_PAD_WEAKPULL_PULLUP 0x1 /* Weak pull up enabled */
+#define PHY_PAD_WEAKPULL_PULLDOWN 0x2 /* Weak pull down enabled */
+
+#define PHY_PAD_TXSLEW_CTRL_P_MASK GENMASK(8, 5) /* bits [8:5] */
+#define PHY_PAD_TXSLEW_CTRL_P 0x3 /* Slew control for P-Type pad TX */
+#define PHY_PAD_TXSLEW_CTRL_N_MASK GENMASK(12, 9) /* bits [12:9] */
+#define PHY_PAD_TXSLEW_CTRL_N 0x3 /* Slew control for N-Type pad TX */
+#define PHY_PAD_TXSLEW_CTRL_N_SG2042 0x2 /* Slew control for N-Type pad TX for SG2042 */
+
+/* PHY CLK delay line settings */
+#define PHY_SDCLKDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1d)
+#define PHY_SDCLKDL_CNFG_EXTDLY_EN BIT(0)
+#define PHY_SDCLKDL_CNFG_UPDATE BIT(4) /* set before writing to SDCLKDL_DC */
+
+/* PHY CLK delay line delay code */
+#define PHY_SDCLKDL_DC_R (DWC_MSHC_PTR_PHY_R + 0x1e)
+#define PHY_SDCLKDL_DC_INITIAL 0x40 /* initial delay code */
+#define PHY_SDCLKDL_DC_DEFAULT 0x32 /* default delay code */
+#define PHY_SDCLKDL_DC_HS400 0x18 /* delay code for HS400 mode */
+
+#define PHY_SMPLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x20)
+#define PHY_SMPLDL_CNFG_BYPASS_EN BIT(1)
+
+/* PHY drift_cclk_rx delay line configuration setting */
+#define PHY_ATDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x21)
+#define PHY_ATDL_CNFG_INPSEL_MASK GENMASK(3, 2) /* bits [3:2] */
+#define PHY_ATDL_CNFG_INPSEL 0x3 /* delay line input source */
+#define PHY_ATDL_CNFG_INPSEL_SG2042 0x2 /* delay line input source for SG2042 */
+
+/* PHY DLL control settings */
+#define PHY_DLL_CTRL_R (DWC_MSHC_PTR_PHY_R + 0x24)
+#define PHY_DLL_CTRL_DISABLE 0x0 /* PHY DLL is enabled */
+#define PHY_DLL_CTRL_ENABLE 0x1 /* PHY DLL is disabled */
+
+/* PHY DLL configuration register 1 */
+#define PHY_DLL_CNFG1_R (DWC_MSHC_PTR_PHY_R + 0x25)
+#define PHY_DLL_CNFG1_SLVDLY_MASK GENMASK(5, 4) /* bits [5:4] */
+#define PHY_DLL_CNFG1_SLVDLY 0x2 /* DLL slave update delay input */
+#define PHY_DLL_CNFG1_WAITCYCLE 0x5 /* DLL wait cycle input */
+
+/* PHY DLL configuration register 2 */
+#define PHY_DLL_CNFG2_R (DWC_MSHC_PTR_PHY_R + 0x26)
+#define PHY_DLL_CNFG2_JUMPSTEP 0xa /* DLL jump step input */
+
+/* PHY DLL master and slave delay line configuration settings */
+#define PHY_DLLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x28)
+#define PHY_DLLDL_CNFG_SLV_INPSEL_MASK GENMASK(6, 5) /* bits [6:5] */
+#define PHY_DLLDL_CNFG_SLV_INPSEL 0x3 /* clock source select for slave DL */
+
+/* PHY DLL offset setting register */
+#define PHY_DLL_OFFST_R (DWC_MSHC_PTR_PHY_R + 0x29)
+/* DLL LBT setting register */
+#define PHY_DLLBT_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x2c)
+/* DLL Status register */
+#define PHY_DLL_STATUS_R (DWC_MSHC_PTR_PHY_R + 0x2e)
+#define DLL_LOCK_STS BIT(0)/* DLL is locked and ready */
+/*
+ * Captures the value of DLL's lock error status information. Value is valid
+ * only when LOCK_STS is set.
+ */
+#define DLL_ERROR_STS BIT(1)
+
+#define FLAG_IO_FIXED_1V8 BIT(0)
#define BOUNDARY_OK(addr, len) \
((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
-struct rk3568_priv {
- /* Rockchip specified optional clocks */
- struct clk_bulk_data rockchip_clks[RK3568_MAX_CLKS];
+#define DWCMSHC_SDHCI_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
+ SDHCI_TRNS_BLK_CNT_EN | \
+ SDHCI_TRNS_DMA)
+
+/* SMC call for BlueField-3 eMMC RST_N */
+#define BLUEFIELD_SMC_SET_EMMC_RST_N 0x82000007
+
+/* Eswin specific Registers */
+#define EIC7700_CARD_CLK_STABLE BIT(28)
+#define EIC7700_INT_BCLK_STABLE BIT(16)
+#define EIC7700_INT_ACLK_STABLE BIT(8)
+#define EIC7700_INT_TMCLK_STABLE BIT(0)
+#define EIC7700_INT_CLK_STABLE (EIC7700_CARD_CLK_STABLE | \
+ EIC7700_INT_ACLK_STABLE | \
+ EIC7700_INT_BCLK_STABLE | \
+ EIC7700_INT_TMCLK_STABLE)
+#define EIC7700_HOST_VAL_STABLE BIT(0)
+
+/* strength definition */
+#define PHYCTRL_DR_33OHM 0xee
+#define PHYCTRL_DR_40OHM 0xcc
+#define PHYCTRL_DR_50OHM 0x88
+#define PHYCTRL_DR_66OHM 0x44
+#define PHYCTRL_DR_100OHM 0x00
+
+#define MAX_PHASE_CODE 0xff
+#define TUNING_RANGE_THRESHOLD 40
+#define PHY_CLK_MAX_DELAY_MASK 0x7f
+#define PHY_DELAY_CODE_MAX 0x7f
+#define PHY_DELAY_CODE_EMMC 0x17
+#define PHY_DELAY_CODE_SD 0x55
+
+enum dwcmshc_rk_type {
+ DWCMSHC_RK3568,
+ DWCMSHC_RK3588,
+};
+
+struct rk35xx_priv {
+ struct reset_control *reset;
+ enum dwcmshc_rk_type devtype;
u8 txclk_tapnum;
};
+struct eic7700_priv {
+ struct reset_control *reset;
+ unsigned int drive_impedance;
+};
+
+#define DWCMSHC_MAX_OTHER_CLKS 3
+
struct dwcmshc_priv {
struct clk *bus_clk;
- int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA reg */
+ int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA1 reg */
+ int vendor_specific_area2; /* P_VENDOR_SPECIFIC_AREA2 reg */
+
+ int num_other_clks;
+ struct clk_bulk_data other_clks[DWCMSHC_MAX_OTHER_CLKS];
+
void *priv; /* pointer to SoC private stuff */
+ u16 delay_line;
+ u16 flags;
+};
+
+struct dwcmshc_pltfm_data {
+ const struct sdhci_pltfm_data pdata;
+ const struct cqhci_host_ops *cqhci_host_ops;
+ int (*init)(struct device *dev, struct sdhci_host *host, struct dwcmshc_priv *dwc_priv);
+ void (*postinit)(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv);
};
+static void dwcmshc_enable_card_clk(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if ((ctrl & SDHCI_CLOCK_INT_EN) && !(ctrl & SDHCI_CLOCK_CARD_EN)) {
+ ctrl |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+ }
+}
+
+static int dwcmshc_get_enable_other_clks(struct device *dev,
+ struct dwcmshc_priv *priv,
+ int num_clks,
+ const char * const clk_ids[])
+{
+ int err;
+
+ if (num_clks > DWCMSHC_MAX_OTHER_CLKS)
+ return -EINVAL;
+
+ for (int i = 0; i < num_clks; i++)
+ priv->other_clks[i].id = clk_ids[i];
+
+ err = devm_clk_bulk_get_optional(dev, num_clks, priv->other_clks);
+ if (err) {
+ dev_err(dev, "failed to get clocks %d\n", err);
+ return err;
+ }
+
+ err = clk_bulk_prepare_enable(num_clks, priv->other_clks);
+ if (err)
+ dev_err(dev, "failed to enable clocks %d\n", err);
+
+ priv->num_other_clks = num_clks;
+
+ return err;
+}
+
/*
* If DMA addr spans 128MB boundary, we split the DMA transfer into two
* so that each DMA transfer doesn't exceed the boundary.
@@ -95,6 +352,19 @@ static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
sdhci_adma_write_desc(host, desc, addr, len, cmd);
}
+static void dwcmshc_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+
+ /* The dwcmshc does not comply with the SDHCI specification
+ * regarding the "Software Reset for CMD line should clear 'Command
+ * Complete' in the Normal Interrupt Status Register." Clear the bit
+ * here to compensate for this quirk.
+ */
+ if (mask & SDHCI_RESET_CMD)
+ sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+}
+
static unsigned int dwcmshc_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -105,6 +375,13 @@ static unsigned int dwcmshc_get_max_clock(struct sdhci_host *host)
return pltfm_host->clock;
}
+static unsigned int rk35xx_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_round_rate(pltfm_host->clk, ULONG_MAX);
+}
+
static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
struct mmc_request *mrq)
{
@@ -128,10 +405,91 @@ static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_request(mmc, mrq);
}
+static void dwcmshc_phy_init(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 rxsel = PHY_PAD_RXSEL_3V3;
+ u32 val;
+
+ if (priv->flags & FLAG_IO_FIXED_1V8 ||
+ host->mmc->ios.timing & MMC_SIGNAL_VOLTAGE_180)
+ rxsel = PHY_PAD_RXSEL_1V8;
+
+ /* deassert phy reset & set tx drive strength */
+ val = PHY_CNFG_RSTN_DEASSERT;
+ val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP);
+ val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN);
+ sdhci_writel(host, val, PHY_CNFG_R);
+
+ /* disable delay line */
+ sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R);
+
+ /* set delay line */
+ sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R);
+ sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R);
+
+ /* enable delay lane */
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
+ val &= ~(PHY_SDCLKDL_CNFG_UPDATE);
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
+
+ /* configure phy pads */
+ val = rxsel;
+ val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
+ sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
+
+ val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
+ sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
+
+ val = rxsel;
+ val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
+ sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
+
+ /* enable data strobe mode */
+ if (rxsel == PHY_PAD_RXSEL_1V8) {
+ u8 sel = FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK, PHY_DLLDL_CNFG_SLV_INPSEL);
+
+ sdhci_writeb(host, sel, PHY_DLLDL_CNFG_R);
+ }
+
+ /* enable phy dll */
+ sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
+
+}
+
+static void th1520_sdhci_set_phy(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
+ u16 emmc_ctrl;
+
+ dwcmshc_phy_init(host);
+
+ if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
+ emmc_ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+ emmc_ctrl |= DWCMSHC_CARD_IS_EMMC;
+ sdhci_writew(host, emmc_ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+ }
+
+ sdhci_writeb(host, FIELD_PREP(PHY_DLL_CNFG1_SLVDLY_MASK, PHY_DLL_CNFG1_SLVDLY) |
+ PHY_DLL_CNFG1_WAITCYCLE, PHY_DLL_CNFG1_R);
+}
+
static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
- u16 ctrl_2;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u16 ctrl, ctrl_2;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
@@ -149,11 +507,34 @@ static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
else if ((timing == MMC_TIMING_UHS_DDR50) ||
(timing == MMC_TIMING_MMC_DDR52))
ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
- else if (timing == MMC_TIMING_MMC_HS400)
+ else if (timing == MMC_TIMING_MMC_HS400) {
+ /* set CARD_IS_EMMC bit to enable Data Strobe for HS400 */
+ ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+ ctrl |= DWCMSHC_CARD_IS_EMMC;
+ sdhci_writew(host, ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+
ctrl_2 |= DWCMSHC_CTRL_HS400;
+ }
+
+ if (priv->flags & FLAG_IO_FIXED_1V8)
+ ctrl_2 |= SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
+static void th1520_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
+ dwcmshc_set_uhs_signaling(host, timing);
+ if (timing == MMC_TIMING_MMC_HS400)
+ priv->delay_line = PHY_SDCLKDL_DC_HS400;
+ else
+ sdhci_writeb(host, 0, PHY_DLLDL_CNFG_R);
+ th1520_sdhci_set_phy(host);
+}
+
static void dwcmshc_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -172,28 +553,173 @@ static void dwcmshc_hs400_enhanced_strobe(struct mmc_host *mmc,
sdhci_writel(host, vendor, reg);
}
+static int dwcmshc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ int err = sdhci_execute_tuning(mmc, opcode);
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (err)
+ return err;
+
+ /*
+ * Tuning can leave the IP in an active state (Buffer Read Enable bit
+ * set) which prevents the entry to low power states (i.e. S0i3). Data
+ * reset will clear it.
+ */
+ sdhci_reset(host, SDHCI_RESET_DATA);
+
+ return 0;
+}
+
+static u32 dwcmshc_cqe_irq_handler(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void dwcmshc_sdhci_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u8 ctrl;
+
+ sdhci_writew(host, DWCMSHC_SDHCI_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
+
+ sdhci_cqe_enable(mmc);
+
+ /*
+ * The "DesignWare Cores Mobile Storage Host Controller
+ * DWC_mshc / DWC_mshc_lite Databook" says:
+ * when Host Version 4 Enable" is 1 in Host Control 2 register,
+ * SDHCI_CTRL_ADMA32 bit means ADMA2 is selected.
+ * Selection of 32-bit/64-bit System Addressing:
+ * either 32-bit or 64-bit system addressing is selected by
+ * 64-bit Addressing bit in Host Control 2 register.
+ *
+ * On the other hand the "DesignWare Cores Mobile Storage Host
+ * Controller DWC_mshc / DWC_mshc_lite User Guide" says, that we have to
+ * set DMA_SEL to ADMA2 _only_ mode in the Host Control 2 register.
+ */
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ ctrl |= SDHCI_CTRL_ADMA32;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+static void dwcmshc_set_tran_desc(struct cqhci_host *cq_host, u8 **desc,
+ dma_addr_t addr, int len, bool end, bool dma64)
+{
+ int tmplen, offset;
+
+ if (likely(!len || BOUNDARY_OK(addr, len))) {
+ cqhci_set_tran_desc(*desc, addr, len, end, dma64);
+ return;
+ }
+
+ offset = addr & (SZ_128M - 1);
+ tmplen = SZ_128M - offset;
+ cqhci_set_tran_desc(*desc, addr, tmplen, false, dma64);
+
+ addr += tmplen;
+ len -= tmplen;
+ *desc += cq_host->trans_desc_len;
+ cqhci_set_tran_desc(*desc, addr, len, end, dma64);
+}
+
+static void dwcmshc_cqhci_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void rk35xx_sdhci_cqe_pre_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ /* Set Send Status Command Idle Timer to 10.66us (256 * 1 / 24) */
+ reg = sdhci_readl(host, dwc_priv->vendor_specific_area2 + CQHCI_SSC1);
+ reg = (reg & ~CQHCI_SSC1_CIT_MASK) | 0x0100;
+ sdhci_writel(host, reg, dwc_priv->vendor_specific_area2 + CQHCI_SSC1);
+
+ reg = sdhci_readl(host, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
+ reg |= CQHCI_ENABLE;
+ sdhci_writel(host, reg, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
+}
+
+static void rk35xx_sdhci_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ }
+
+ sdhci_writew(host, DWCMSHC_SDHCI_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
+
+ sdhci_cqe_enable(mmc);
+}
+
+static void rk35xx_sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 ctrl;
+
+ /*
+ * During CQE command transfers, command complete bit gets latched.
+ * So s/w should clear command complete interrupt status when CQE is
+ * either halted or disabled. Otherwise unexpected SDCHI legacy
+ * interrupt gets triggered when CQE is halted/disabled.
+ */
+ spin_lock_irqsave(&host->lock, flags);
+ ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
+ ctrl |= SDHCI_INT_RESPONSE;
+ sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ sdhci_cqe_disable(mmc, recovery);
+}
+
+static void rk35xx_sdhci_cqe_post_disable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
+ u32 ctrl;
+
+ ctrl = sdhci_readl(host, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
+ ctrl &= ~CQHCI_ENABLE;
+ sdhci_writel(host, ctrl, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
+}
+
static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *priv = dwc_priv->priv;
+ struct rk35xx_priv *priv = dwc_priv->priv;
u8 txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
u32 extra, reg;
int err;
host->mmc->actual_clock = 0;
- /*
- * DO NOT TOUCH THIS SETTING. RX clk inverter unit is enabled
- * by default, but it shouldn't be enabled. We should anyway
- * disable it before issuing any cmds.
- */
- extra = DWCMSHC_EMMC_DLL_DLYENA |
- DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
- sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
-
- if (clock == 0)
+ if (clock == 0) {
+ /* Disable interface clock at initial state. */
+ sdhci_set_clock(host, clock);
return;
+ }
/* Rockchip platform only support 375KHz for identify mode */
if (clock <= 400000)
@@ -205,15 +731,31 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
sdhci_set_clock(host, clock);
- /* Disable cmd conflict check */
+ /* Disable cmd conflict check and internal clock gate */
reg = dwc_priv->vendor_specific_area1 + DWCMSHC_HOST_CTRL3;
extra = sdhci_readl(host, reg);
extra &= ~BIT(0);
+ extra |= BIT(4);
sdhci_writel(host, extra, reg);
- if (clock <= 400000) {
- /* Disable DLL to reset sample clock */
- sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_CTRL);
+ if (clock <= 52000000) {
+ /*
+ * Disable DLL and reset both of sample and drive clock.
+ * The bypass bit and start bit need to be set if DLL is not locked.
+ */
+ sdhci_writel(host, DWCMSHC_EMMC_DLL_BYPASS | DWCMSHC_EMMC_DLL_START, DWCMSHC_EMMC_DLL_CTRL);
+ sdhci_writel(host, DLL_RXCLK_ORI_GATE, DWCMSHC_EMMC_DLL_RXCLK);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
+ sdhci_writel(host, 0, DECMSHC_EMMC_DLL_CMDOUT);
+ /*
+ * Before switching to hs400es mode, the driver will enable
+ * enhanced strobe first. PHY needs to configure the parameters
+ * of enhanced strobe first.
+ */
+ extra = DWCMSHC_EMMC_DLL_DLYENA |
+ DLL_STRBIN_DELAY_NUM_SEL |
+ DLL_STRBIN_DELAY_NUM_DEFAULT << DLL_STRBIN_DELAY_NUM_OFFSET;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
return;
}
@@ -222,6 +764,15 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
udelay(1);
sdhci_writel(host, 0x0, DWCMSHC_EMMC_DLL_CTRL);
+ /*
+ * We shouldn't set DLL_RXCLK_NO_INVERTER for identify mode but
+ * we must set it in higher speed mode.
+ */
+ extra = DWCMSHC_EMMC_DLL_DLYENA;
+ if (priv->devtype == DWCMSHC_RK3568)
+ extra |= DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
+
/* Init DLL settings */
extra = 0x5 << DWCMSHC_EMMC_DLL_START_POINT |
0x2 << DWCMSHC_EMMC_DLL_INC |
@@ -236,7 +787,7 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
}
extra = 0x1 << 16 | /* tune clock stop en */
- 0x2 << 17 | /* pre-change delay */
+ 0x3 << 17 | /* pre-change delay */
0x3 << 19; /* post-change delay */
sdhci_writel(host, extra, dwc_priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
@@ -244,8 +795,20 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
host->mmc->ios.timing == MMC_TIMING_MMC_HS400)
txclk_tapnum = priv->txclk_tapnum;
+ if ((priv->devtype == DWCMSHC_RK3588) && host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
+ txclk_tapnum = DLL_TXCLK_TAPNUM_90_DEGREES;
+
+ extra = DLL_CMDOUT_SRC_CLK_NEG |
+ DLL_CMDOUT_EN_SRC_CLK_NEG |
+ DWCMSHC_EMMC_DLL_DLYENA |
+ DLL_CMDOUT_TAPNUM_90_DEGREES |
+ DLL_CMDOUT_TAPNUM_FROM_SW;
+ sdhci_writel(host, extra, DECMSHC_EMMC_DLL_CMDOUT);
+ }
+
extra = DWCMSHC_EMMC_DLL_DLYENA |
DLL_TXCLK_TAPNUM_FROM_SW |
+ DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL |
txclk_tapnum;
sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_TXCLK);
@@ -255,90 +818,1129 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
}
+static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
+ struct rk35xx_priv *priv = dwc_priv->priv;
+ u32 extra = sdhci_readl(host, DECMSHC_EMMC_MISC_CON);
+
+ if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
+ cqhci_deactivate(host->mmc);
+
+ if (mask & SDHCI_RESET_ALL && priv->reset) {
+ reset_control_assert(priv->reset);
+ udelay(1);
+ reset_control_deassert(priv->reset);
+ }
+
+ sdhci_reset(host, mask);
+
+ /* Enable INTERNAL CLOCK */
+ sdhci_writel(host, MISC_INTCLK_EN | extra, DECMSHC_EMMC_MISC_CON);
+}
+
+static int dwcmshc_rk35xx_init(struct device *dev, struct sdhci_host *host,
+ struct dwcmshc_priv *dwc_priv)
+{
+ static const char * const clk_ids[] = {"axi", "block", "timer"};
+ struct rk35xx_priv *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(struct rk35xx_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (of_device_is_compatible(dev->of_node, "rockchip,rk3588-dwcmshc"))
+ priv->devtype = DWCMSHC_RK3588;
+ else
+ priv->devtype = DWCMSHC_RK3568;
+
+ priv->reset = devm_reset_control_array_get_optional_exclusive(mmc_dev(host->mmc));
+ if (IS_ERR(priv->reset)) {
+ err = PTR_ERR(priv->reset);
+ dev_err(mmc_dev(host->mmc), "failed to get reset control %d\n", err);
+ return err;
+ }
+
+ err = dwcmshc_get_enable_other_clks(mmc_dev(host->mmc), dwc_priv,
+ ARRAY_SIZE(clk_ids), clk_ids);
+ if (err)
+ return err;
+
+ if (of_property_read_u8(mmc_dev(host->mmc)->of_node, "rockchip,txclk-tapnum",
+ &priv->txclk_tapnum))
+ priv->txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
+
+ /* Disable cmd conflict check */
+ sdhci_writel(host, 0x0, dwc_priv->vendor_specific_area1 + DWCMSHC_HOST_CTRL3);
+ /* Reset previous settings */
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_STRBIN);
+
+ dwc_priv->priv = priv;
+
+ return 0;
+}
+
+static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+{
+ /*
+ * Don't support highspeed bus mode with low clk speed as we
+ * cannot use DLL for this condition.
+ */
+ if (host->mmc->f_max <= 52000000) {
+ dev_info(mmc_dev(host->mmc), "Disabling HS200/HS400, frequency too low (%d)\n",
+ host->mmc->f_max);
+ host->mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400);
+ host->mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR);
+ }
+}
+
+static void dwcmshc_rk3576_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ int ret;
+
+ /*
+ * This works around the design of the RK3576's power domains, which
+ * makes the PD_NVM power domain, which the sdhci controller on the
+ * RK3576 is in, never come back the same way once it's run-time
+ * suspended once. This can happen during early kernel boot if no driver
+ * is using either PD_NVM or its child power domain PD_SDGMAC for a
+ * short moment, leading to it being turned off to save power. By
+ * keeping it on, sdhci suspending won't lead to PD_NVM becoming a
+ * candidate for getting turned off.
+ */
+ ret = dev_pm_genpd_rpm_always_on(dev, true);
+ if (ret && ret != -EOPNOTSUPP)
+ dev_warn(dev, "failed to set PD rpm always on, SoC may hang later: %pe\n",
+ ERR_PTR(ret));
+
+ dwcmshc_rk35xx_postinit(host, dwc_priv);
+}
+
+static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 val = 0;
+
+ if (host->flags & SDHCI_HS400_TUNING)
+ return 0;
+
+ sdhci_writeb(host, FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL),
+ PHY_ATDL_CNFG_R);
+ val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+
+ /*
+ * configure tuning settings:
+ * - center phase select code driven in block gap interval
+ * - disable reporting of framing errors
+ * - disable software managed tuning
+ * - disable user selection of sampling window edges,
+ * instead tuning calculated edges are used
+ */
+ val &= ~(AT_CTRL_CI_SEL | AT_CTRL_RPT_TUNE_ERR | AT_CTRL_SW_TUNE_EN |
+ FIELD_PREP(AT_CTRL_WIN_EDGE_SEL_MASK, AT_CTRL_WIN_EDGE_SEL));
+
+ /*
+ * configure tuning settings:
+ * - enable auto-tuning
+ * - enable sampling window threshold
+ * - stop clocks during phase code change
+ * - set max latency in cycles between tx and rx clocks
+ * - set max latency in cycles to switch output phase
+ * - set max sampling window threshold value
+ */
+ val |= AT_CTRL_AT_EN | AT_CTRL_SWIN_TH_EN | AT_CTRL_TUNE_CLK_STOP_EN;
+ val |= FIELD_PREP(AT_CTRL_PRE_CHANGE_DLY_MASK, AT_CTRL_PRE_CHANGE_DLY);
+ val |= FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, AT_CTRL_POST_CHANGE_DLY);
+ val |= FIELD_PREP(AT_CTRL_SWIN_TH_VAL_MASK, AT_CTRL_SWIN_TH_VAL);
+
+ sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+ val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+
+ /* perform tuning */
+ sdhci_start_tuning(host);
+ host->tuning_loop_count = 128;
+ host->tuning_err = __sdhci_execute_tuning(host, opcode);
+ if (host->tuning_err) {
+ /* disable auto-tuning upon tuning error */
+ val &= ~AT_CTRL_AT_EN;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+ dev_err(mmc_dev(host->mmc), "tuning failed: %d\n", host->tuning_err);
+ return -EIO;
+ }
+ sdhci_end_tuning(host);
+
+ return 0;
+}
+
+static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u16 ctrl_2;
+
+ dwcmshc_reset(host, mask);
+
+ if (priv->flags & FLAG_IO_FIXED_1V8) {
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (!(ctrl_2 & SDHCI_CTRL_VDD_180)) {
+ ctrl_2 |= SDHCI_CTRL_VDD_180;
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+ }
+ }
+}
+
+static int th1520_init(struct device *dev,
+ struct sdhci_host *host,
+ struct dwcmshc_priv *dwc_priv)
+{
+ dwc_priv->delay_line = PHY_SDCLKDL_DC_DEFAULT;
+
+ if (device_property_read_bool(dev, "mmc-ddr-1_8v") ||
+ device_property_read_bool(dev, "mmc-hs200-1_8v") ||
+ device_property_read_bool(dev, "mmc-hs400-1_8v"))
+ dwc_priv->flags |= FLAG_IO_FIXED_1V8;
+ else
+ dwc_priv->flags &= ~FLAG_IO_FIXED_1V8;
+
+ /*
+ * start_signal_voltage_switch() will try 3.3V first
+ * then 1.8V. Use SDHCI_SIGNALING_180 rather than
+ * SDHCI_SIGNALING_330 to avoid setting voltage to 3.3V
+ * in sdhci_start_signal_voltage_switch().
+ */
+ if (dwc_priv->flags & FLAG_IO_FIXED_1V8) {
+ host->flags &= ~SDHCI_SIGNALING_330;
+ host->flags |= SDHCI_SIGNALING_180;
+ }
+
+ sdhci_enable_v4_mode(host);
+
+ return 0;
+}
+
+static void cv18xx_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 val, emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
+
+ dwcmshc_reset(host, mask);
+
+ if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
+ val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+ val |= CV18XX_EMMC_FUNC_EN;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+ }
+
+ val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+ val |= CV18XX_LATANCY_1T;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+
+ val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
+ val |= CV18XX_PHY_TX_BPS;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
+
+ val = (FIELD_PREP(CV18XX_PHY_TX_DLY_MSK, 0) |
+ FIELD_PREP(CV18XX_PHY_TX_SRC_MSK, CV18XX_PHY_TX_SRC_INVERT_CLK_TX) |
+ FIELD_PREP(CV18XX_PHY_RX_DLY_MSK, 0) |
+ FIELD_PREP(CV18XX_PHY_RX_SRC_MSK, CV18XX_PHY_RX_SRC_INVERT_RX_CLK));
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_TX_RX_DLY);
+}
+
+static void cv18xx_sdhci_set_tap(struct sdhci_host *host, int tap)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u16 clk;
+ u32 val;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+ val &= ~CV18XX_LATANCY_1T;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+
+ val = (FIELD_PREP(CV18XX_PHY_TX_DLY_MSK, 0) |
+ FIELD_PREP(CV18XX_PHY_TX_SRC_MSK, CV18XX_PHY_TX_SRC_INVERT_CLK_TX) |
+ FIELD_PREP(CV18XX_PHY_RX_DLY_MSK, tap));
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_TX_RX_DLY);
+
+ sdhci_writel(host, 0, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ usleep_range(1000, 2000);
+}
+
+static int cv18xx_retry_tuning(struct mmc_host *mmc, u32 opcode, int *cmd_error)
+{
+ int ret, retry = 0;
+
+ while (retry < CV18XX_RETRY_TUNING_MAX) {
+ ret = mmc_send_tuning(mmc, opcode, NULL);
+ if (ret)
+ return ret;
+ retry++;
+ }
+
+ return 0;
+}
+
+static void cv18xx_sdhci_post_tuning(struct sdhci_host *host)
+{
+ u32 val;
+
+ val = sdhci_readl(host, SDHCI_INT_STATUS);
+ val |= SDHCI_INT_DATA_AVAIL;
+ sdhci_writel(host, val, SDHCI_INT_STATUS);
+
+ dwcmshc_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+}
+
+static int cv18xx_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ int min, max, avg, ret;
+ int win_length, target_min, target_max, target_win_length;
+
+ min = max = 0;
+ target_win_length = 0;
+
+ sdhci_reset_tuning(host);
+
+ while (max < CV18XX_TUNE_MAX) {
+ /* find the mininum delay first which can pass tuning */
+ while (min < CV18XX_TUNE_MAX) {
+ cv18xx_sdhci_set_tap(host, min);
+ if (!cv18xx_retry_tuning(host->mmc, opcode, NULL))
+ break;
+ min += CV18XX_TUNE_STEP;
+ }
+
+ /* find the maxinum delay which can not pass tuning */
+ max = min + CV18XX_TUNE_STEP;
+ while (max < CV18XX_TUNE_MAX) {
+ cv18xx_sdhci_set_tap(host, max);
+ if (cv18xx_retry_tuning(host->mmc, opcode, NULL)) {
+ max -= CV18XX_TUNE_STEP;
+ break;
+ }
+ max += CV18XX_TUNE_STEP;
+ }
+
+ win_length = max - min + 1;
+ /* get the largest pass window */
+ if (win_length > target_win_length) {
+ target_win_length = win_length;
+ target_min = min;
+ target_max = max;
+ }
+
+ /* continue to find the next pass window */
+ min = max + CV18XX_TUNE_STEP;
+ }
+
+ cv18xx_sdhci_post_tuning(host);
+
+ /* use average delay to get the best timing */
+ avg = (target_min + target_max) / 2;
+ cv18xx_sdhci_set_tap(host, avg);
+ ret = mmc_send_tuning(host->mmc, opcode, NULL);
+
+ dev_dbg(mmc_dev(host->mmc), "tuning %s at 0x%x ret %d\n",
+ ret ? "failed" : "passed", avg, ret);
+
+ return ret;
+}
+
+static inline void sg2042_sdhci_phy_init(struct sdhci_host *host)
+{
+ u32 val;
+
+ /* Asset phy reset & set tx drive strength */
+ val = sdhci_readl(host, PHY_CNFG_R);
+ val &= ~PHY_CNFG_RSTN_DEASSERT;
+ val |= FIELD_PREP(PHY_CNFG_PHY_PWRGOOD_MASK, 1);
+ val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP_SG2042);
+ val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN_SG2042);
+ sdhci_writel(host, val, PHY_CNFG_R);
+
+ /* Configure phy pads */
+ val = PHY_PAD_RXSEL_3V3;
+ val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
+
+ val = PHY_PAD_RXSEL_3V3;
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
+
+ val = PHY_PAD_RXSEL_3V3;
+ val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
+
+ /* Configure delay line */
+ /* Enable fixed delay */
+ sdhci_writeb(host, PHY_SDCLKDL_CNFG_EXTDLY_EN, PHY_SDCLKDL_CNFG_R);
+ /*
+ * Set delay line.
+ * Its recommended that bit UPDATE_DC[4] is 1 when SDCLKDL_DC is being written.
+ * Ensure UPDATE_DC[4] is '0' when not updating code.
+ */
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
+ val |= PHY_SDCLKDL_CNFG_UPDATE;
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
+ /* Add 10 * 70ps = 0.7ns for output delay */
+ sdhci_writeb(host, 10, PHY_SDCLKDL_DC_R);
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
+ val &= ~(PHY_SDCLKDL_CNFG_UPDATE);
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
+
+ /* Set SMPLDL_CNFG, Bypass */
+ sdhci_writeb(host, PHY_SMPLDL_CNFG_BYPASS_EN, PHY_SMPLDL_CNFG_R);
+
+ /* Set ATDL_CNFG, tuning clk not use for init */
+ val = FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL_SG2042);
+ sdhci_writeb(host, val, PHY_ATDL_CNFG_R);
+
+ /* Deasset phy reset */
+ val = sdhci_readl(host, PHY_CNFG_R);
+ val |= PHY_CNFG_RSTN_DEASSERT;
+ sdhci_writel(host, val, PHY_CNFG_R);
+}
+
+static void sg2042_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+
+ if (mask & SDHCI_RESET_ALL)
+ sg2042_sdhci_phy_init(host);
+}
+
+static int sg2042_init(struct device *dev, struct sdhci_host *host,
+ struct dwcmshc_priv *dwc_priv)
+{
+ static const char * const clk_ids[] = {"timer"};
+
+ return dwcmshc_get_enable_other_clks(mmc_dev(host->mmc), dwc_priv,
+ ARRAY_SIZE(clk_ids), clk_ids);
+}
+
+static void sdhci_eic7700_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ u16 clk;
+
+ host->mmc->actual_clock = clock;
+
+ if (clock == 0) {
+ sdhci_set_clock(host, clock);
+ return;
+ }
+
+ clk_set_rate(pltfm_host->clk, clock);
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ dwcmshc_enable_card_clk(host);
+}
+
+static void sdhci_eic7700_config_phy_delay(struct sdhci_host *host, int delay)
+{
+ delay &= PHY_CLK_MAX_DELAY_MASK;
+
+ /* phy clk delay line config */
+ sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R);
+ sdhci_writeb(host, delay, PHY_SDCLKDL_DC_R);
+ sdhci_writeb(host, 0x0, PHY_SDCLKDL_CNFG_R);
+}
+
+static void sdhci_eic7700_config_phy(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
+ u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
+ struct eic7700_priv *priv = dwc_priv->priv;
+ unsigned int val, drv;
+
+ drv = FIELD_PREP(PHY_CNFG_PAD_SP_MASK, priv->drive_impedance & 0xF);
+ drv |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, (priv->drive_impedance >> 4) & 0xF);
+
+ if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
+ val = sdhci_readw(host, dwc_priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+ val |= DWCMSHC_CARD_IS_EMMC;
+ sdhci_writew(host, val, dwc_priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+ }
+
+ /* reset phy, config phy's pad */
+ sdhci_writel(host, drv | ~PHY_CNFG_RSTN_DEASSERT, PHY_CNFG_R);
+
+ /* configure phy pads */
+ val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
+ val |= PHY_PAD_RXSEL_1V8;
+ sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
+
+ /* Clock PAD Setting */
+ val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
+
+ /* PHY strobe PAD setting (EMMC only) */
+ if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
+ val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ val |= PHY_PAD_RXSEL_1V8;
+ sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
+ }
+ usleep_range(2000, 3000);
+ sdhci_writel(host, drv | PHY_CNFG_RSTN_DEASSERT, PHY_CNFG_R);
+ sdhci_eic7700_config_phy_delay(host, dwc_priv->delay_line);
+}
+
+static void sdhci_eic7700_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+
+ /* after reset all, the phy's config will be clear */
+ if (mask == SDHCI_RESET_ALL)
+ sdhci_eic7700_config_phy(host);
+}
+
+static int sdhci_eic7700_reset_init(struct device *dev, struct eic7700_priv *priv)
+{
+ int ret;
+
+ priv->reset = devm_reset_control_array_get_optional_exclusive(dev);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ dev_err(dev, "failed to get reset control %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_assert(priv->reset);
+ if (ret) {
+ dev_err(dev, "Failed to assert reset signals: %d\n", ret);
+ return ret;
+ }
+ usleep_range(2000, 2100);
+ ret = reset_control_deassert(priv->reset);
+ if (ret) {
+ dev_err(dev, "Failed to deassert reset signals: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static unsigned int eic7700_convert_drive_impedance_ohm(struct device *dev, unsigned int dr_ohm)
+{
+ switch (dr_ohm) {
+ case 100:
+ return PHYCTRL_DR_100OHM;
+ case 66:
+ return PHYCTRL_DR_66OHM;
+ case 50:
+ return PHYCTRL_DR_50OHM;
+ case 40:
+ return PHYCTRL_DR_40OHM;
+ case 33:
+ return PHYCTRL_DR_33OHM;
+ }
+
+ dev_warn(dev, "Invalid value %u for drive-impedance-ohms.\n", dr_ohm);
+ return PHYCTRL_DR_50OHM;
+}
+
+static int sdhci_eic7700_delay_tuning(struct sdhci_host *host, u32 opcode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
+ int delay_min = -1;
+ int delay_max = -1;
+ int cmd_error = 0;
+ int delay = 0;
+ int i = 0;
+ int ret;
+
+ for (i = 0; i <= PHY_DELAY_CODE_MAX; i++) {
+ sdhci_eic7700_config_phy_delay(host, i);
+ ret = mmc_send_tuning(host->mmc, opcode, &cmd_error);
+ if (ret) {
+ host->ops->reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ usleep_range(200, 210);
+ if (delay_min != -1 && delay_max != -1)
+ break;
+ } else {
+ if (delay_min == -1) {
+ delay_min = i;
+ continue;
+ } else {
+ delay_max = i;
+ continue;
+ }
+ }
+ }
+ if (delay_min == -1 && delay_max == -1) {
+ pr_err("%s: delay code tuning failed!\n", mmc_hostname(host->mmc));
+ sdhci_eic7700_config_phy_delay(host, dwc_priv->delay_line);
+ return ret;
+ }
+
+ delay = (delay_min + delay_max) / 2;
+ sdhci_eic7700_config_phy_delay(host, delay);
+
+ return 0;
+}
+
+static int sdhci_eic7700_phase_code_tuning(struct sdhci_host *host, u32 opcode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 sd_caps = MMC_CAP2_NO_MMC | MMC_CAP2_NO_SDIO;
+ int phase_code = -1;
+ int code_range = -1;
+ bool is_sd = false;
+ int code_min = -1;
+ int code_max = -1;
+ int cmd_error = 0;
+ int ret = 0;
+ int i = 0;
+
+ if ((host->mmc->caps2 & sd_caps) == sd_caps)
+ is_sd = true;
+
+ for (i = 0; i <= MAX_PHASE_CODE; i++) {
+ /* Centered Phase code */
+ sdhci_writew(host, i, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
+ ret = mmc_send_tuning(host->mmc, opcode, &cmd_error);
+ host->ops->reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ if (ret) {
+ /* SD specific range tracking */
+ if (is_sd && code_min != -1 && code_max != -1) {
+ if (code_max - code_min > code_range) {
+ code_range = code_max - code_min;
+ phase_code = (code_min + code_max) / 2;
+ if (code_range > TUNING_RANGE_THRESHOLD)
+ break;
+ }
+ code_min = -1;
+ code_max = -1;
+ }
+ /* EMMC breaks after first valid range */
+ if (!is_sd && code_min != -1 && code_max != -1)
+ break;
+ } else {
+ /* Track valid phase code range */
+ if (code_min == -1) {
+ code_min = i;
+ if (!is_sd)
+ continue;
+ }
+ code_max = i;
+ if (is_sd && i == MAX_PHASE_CODE) {
+ if (code_max - code_min > code_range) {
+ code_range = code_max - code_min;
+ phase_code = (code_min + code_max) / 2;
+ }
+ }
+ }
+ }
+
+ /* Handle tuning failure case */
+ if ((is_sd && phase_code == -1) ||
+ (!is_sd && code_min == -1 && code_max == -1)) {
+ pr_err("%s: phase code tuning failed!\n", mmc_hostname(host->mmc));
+ sdhci_writew(host, 0, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
+ return -EIO;
+ }
+ if (!is_sd)
+ phase_code = (code_min + code_max) / 2;
+
+ sdhci_writew(host, phase_code, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
+
+ /* SD specific final verification */
+ if (is_sd) {
+ ret = mmc_send_tuning(host->mmc, opcode, &cmd_error);
+ host->ops->reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ if (ret) {
+ pr_err("%s: Final phase code 0x%x verification failed!\n",
+ mmc_hostname(host->mmc), phase_code);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int sdhci_eic7700_executing_tuning(struct sdhci_host *host, u32 opcode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
+ int ret = 0;
+ u16 ctrl;
+ u32 val;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl &= ~SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+ val |= AT_CTRL_SW_TUNE_EN;
+ sdhci_writew(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+
+ sdhci_writew(host, 0, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
+ sdhci_writew(host, 0x0, SDHCI_CMD_DATA);
+
+ if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
+ ret = sdhci_eic7700_delay_tuning(host, opcode);
+ if (ret)
+ return ret;
+ }
+
+ ret = sdhci_eic7700_phase_code_tuning(host, opcode);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sdhci_eic7700_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u8 status;
+ u32 val;
+ int ret;
+
+ dwcmshc_set_uhs_signaling(host, timing);
+
+ /* here need make dll locked when in hs400 at 200MHz */
+ if (timing == MMC_TIMING_MMC_HS400 && host->clock == 200000000) {
+ val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+ val &= ~(FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, AT_CTRL_POST_CHANGE_DLY));
+ /* 2-cycle latency */
+ val |= FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, 0x2);
+ sdhci_writew(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+
+ sdhci_writeb(host, FIELD_PREP(PHY_DLL_CNFG1_SLVDLY_MASK, PHY_DLL_CNFG1_SLVDLY) |
+ 0x3, PHY_DLL_CNFG1_R);/* DLL wait cycle input */
+ /* DLL jump step input */
+ sdhci_writeb(host, 0x02, PHY_DLL_CNFG2_R);
+ sdhci_writeb(host, FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK,
+ PHY_DLLDL_CNFG_SLV_INPSEL), PHY_DLLDL_CNFG_R);
+ /* Sets the value of DLL's offset input */
+ sdhci_writeb(host, 0x00, PHY_DLL_OFFST_R);
+ /*
+ * Sets the value of DLL's olbt loadval input. Controls the Ibt
+ * timer's timeout value at which DLL runs a revalidation cycle.
+ */
+ sdhci_writew(host, 0xffff, PHY_DLLBT_CNFG_R);
+ sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
+ usleep_range(100, 110);
+
+ ret = read_poll_timeout(sdhci_readb, status, status & DLL_LOCK_STS, 100, 1000000,
+ false, host, PHY_DLL_STATUS_R);
+ if (ret) {
+ pr_err("%s: DLL lock timeout! status: 0x%x\n",
+ mmc_hostname(host->mmc), status);
+ return;
+ }
+
+ status = sdhci_readb(host, PHY_DLL_STATUS_R);
+ if (status & DLL_ERROR_STS) {
+ pr_err("%s: DLL lock failed!err_status:0x%x\n",
+ mmc_hostname(host->mmc), status);
+ }
+ }
+}
+
+static void sdhci_eic7700_set_uhs_wrapper(struct sdhci_host *host, unsigned int timing)
+{
+ u32 sd_caps = MMC_CAP2_NO_MMC | MMC_CAP2_NO_SDIO;
+
+ if ((host->mmc->caps2 & sd_caps) == sd_caps)
+ sdhci_set_uhs_signaling(host, timing);
+ else
+ sdhci_eic7700_set_uhs_signaling(host, timing);
+}
+
+static int eic7700_init(struct device *dev, struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+{
+ u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
+ unsigned int val, hsp_int_status, hsp_pwr_ctrl;
+ struct of_phandle_args args;
+ struct eic7700_priv *priv;
+ struct regmap *hsp_regmap;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct eic7700_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dwc_priv->priv = priv;
+
+ ret = sdhci_eic7700_reset_init(dev, dwc_priv->priv);
+ if (ret) {
+ dev_err(dev, "failed to reset\n");
+ return ret;
+ }
+
+ ret = of_parse_phandle_with_fixed_args(dev->of_node, "eswin,hsp-sp-csr", 2, 0, &args);
+ if (ret) {
+ dev_err(dev, "Fail to parse 'eswin,hsp-sp-csr' phandle (%d)\n", ret);
+ return ret;
+ }
+
+ hsp_regmap = syscon_node_to_regmap(args.np);
+ if (IS_ERR(hsp_regmap)) {
+ dev_err(dev, "Failed to get regmap for 'eswin,hsp-sp-csr'\n");
+ of_node_put(args.np);
+ return PTR_ERR(hsp_regmap);
+ }
+ hsp_int_status = args.args[0];
+ hsp_pwr_ctrl = args.args[1];
+ of_node_put(args.np);
+ /*
+ * Assert clock stability: write EIC7700_INT_CLK_STABLE to hsp_int_status.
+ * This signals to the eMMC controller that platform clocks (card, ACLK,
+ * BCLK, TMCLK) are enabled and stable.
+ */
+ regmap_write(hsp_regmap, hsp_int_status, EIC7700_INT_CLK_STABLE);
+ /*
+ * Assert voltage stability: write EIC7700_HOST_VAL_STABLE to hsp_pwr_ctrl.
+ * This signals that VDD is stable and permits transition to high-speed
+ * modes (e.g., UHS-I).
+ */
+ regmap_write(hsp_regmap, hsp_pwr_ctrl, EIC7700_HOST_VAL_STABLE);
+
+ if ((host->mmc->caps2 & emmc_caps) == emmc_caps)
+ dwc_priv->delay_line = PHY_DELAY_CODE_EMMC;
+ else
+ dwc_priv->delay_line = PHY_DELAY_CODE_SD;
+
+ if (!of_property_read_u32(dev->of_node, "eswin,drive-impedance-ohms", &val))
+ priv->drive_impedance = eic7700_convert_drive_impedance_ohm(dev, val);
+ return 0;
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = dwcmshc_set_uhs_signaling,
.get_max_clock = dwcmshc_get_max_clock,
+ .reset = dwcmshc_reset,
+ .adma_write_desc = dwcmshc_adma_write_desc,
+ .irq = dwcmshc_cqe_irq_handler,
+};
+
+#ifdef CONFIG_ACPI
+static void dwcmshc_bf3_hw_reset(struct sdhci_host *host)
+{
+ struct arm_smccc_res res = { 0 };
+
+ arm_smccc_smc(BLUEFIELD_SMC_SET_EMMC_RST_N, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0)
+ pr_err("%s: RST_N failed.\n", mmc_hostname(host->mmc));
+}
+
+static const struct sdhci_ops sdhci_dwcmshc_bf3_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = dwcmshc_set_uhs_signaling,
+ .get_max_clock = dwcmshc_get_max_clock,
.reset = sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
+ .irq = dwcmshc_cqe_irq_handler,
+ .hw_reset = dwcmshc_bf3_hw_reset,
};
+#endif
-static const struct sdhci_ops sdhci_dwcmshc_rk3568_ops = {
+static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = {
.set_clock = dwcmshc_rk3568_set_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = dwcmshc_set_uhs_signaling,
- .get_max_clock = sdhci_pltfm_clk_get_max_clock,
- .reset = sdhci_reset,
+ .get_max_clock = rk35xx_get_max_clock,
+ .reset = rk35xx_sdhci_reset,
+ .adma_write_desc = dwcmshc_adma_write_desc,
+ .irq = dwcmshc_cqe_irq_handler,
+};
+
+static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = th1520_set_uhs_signaling,
+ .get_max_clock = dwcmshc_get_max_clock,
+ .reset = th1520_sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
+ .voltage_switch = dwcmshc_phy_init,
+ .platform_execute_tuning = th1520_execute_tuning,
};
-static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
- .ops = &sdhci_dwcmshc_ops,
- .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+static const struct sdhci_ops sdhci_dwcmshc_cv18xx_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = dwcmshc_set_uhs_signaling,
+ .get_max_clock = dwcmshc_get_max_clock,
+ .reset = cv18xx_sdhci_reset,
+ .adma_write_desc = dwcmshc_adma_write_desc,
+ .platform_execute_tuning = cv18xx_sdhci_execute_tuning,
};
-static const struct sdhci_pltfm_data sdhci_dwcmshc_rk3568_pdata = {
- .ops = &sdhci_dwcmshc_rk3568_ops,
- .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
- SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+static const struct sdhci_ops sdhci_dwcmshc_sg2042_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = dwcmshc_set_uhs_signaling,
+ .get_max_clock = dwcmshc_get_max_clock,
+ .reset = sg2042_sdhci_reset,
+ .adma_write_desc = dwcmshc_adma_write_desc,
+ .platform_execute_tuning = th1520_execute_tuning,
+};
+
+static const struct sdhci_ops sdhci_dwcmshc_eic7700_ops = {
+ .set_clock = sdhci_eic7700_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_eic7700_reset,
+ .set_uhs_signaling = sdhci_eic7700_set_uhs_wrapper,
+ .set_power = sdhci_set_power_and_bus_voltage,
+ .irq = dwcmshc_cqe_irq_handler,
+ .platform_execute_tuning = sdhci_eic7700_executing_tuning,
+};
+
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ },
+};
+
+#ifdef CONFIG_ACPI
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_bf3_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_bf3_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_ACMD23_BROKEN,
+ },
+};
+#endif
+
+static const struct cqhci_host_ops rk35xx_cqhci_ops = {
+ .pre_enable = rk35xx_sdhci_cqe_pre_enable,
+ .enable = rk35xx_sdhci_cqe_enable,
+ .disable = rk35xx_sdhci_cqe_disable,
+ .post_disable = rk35xx_sdhci_cqe_post_disable,
+ .dumpregs = dwcmshc_cqhci_dumpregs,
+ .set_tran_desc = dwcmshc_set_tran_desc,
+};
+
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_rk35xx_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+ },
+ .cqhci_host_ops = &rk35xx_cqhci_ops,
+ .init = dwcmshc_rk35xx_init,
+ .postinit = dwcmshc_rk35xx_postinit,
+};
+
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk3576_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_rk35xx_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+ },
+ .cqhci_host_ops = &rk35xx_cqhci_ops,
+ .init = dwcmshc_rk35xx_init,
+ .postinit = dwcmshc_rk3576_postinit,
};
-static int dwcmshc_rk3568_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_th1520_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_th1520_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ },
+ .init = th1520_init,
+};
+
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_cv18xx_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_cv18xx_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ },
+};
+
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_sg2042_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_sg2042_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ },
+ .init = sg2042_init,
+};
+
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_eic7700_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_eic7700_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+ },
+ .init = eic7700_init,
+};
+
+static const struct cqhci_host_ops dwcmshc_cqhci_ops = {
+ .enable = dwcmshc_sdhci_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = dwcmshc_cqhci_dumpregs,
+ .set_tran_desc = dwcmshc_set_tran_desc,
+};
+
+static void dwcmshc_cqhci_init(struct sdhci_host *host, struct platform_device *pdev,
+ const struct dwcmshc_pltfm_data *pltfm_data)
{
+ struct cqhci_host *cq_host;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ bool dma64 = false;
+ u16 clk;
int err;
- struct rk3568_priv *priv = dwc_priv->priv;
- priv->rockchip_clks[0].id = "axi";
- priv->rockchip_clks[1].id = "block";
- priv->rockchip_clks[2].id = "timer";
- err = devm_clk_bulk_get_optional(mmc_dev(host->mmc), RK3568_MAX_CLKS,
- priv->rockchip_clks);
- if (err) {
- dev_err(mmc_dev(host->mmc), "failed to get clocks %d\n", err);
- return err;
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ dev_err(mmc_dev(host->mmc), "Unable to setup CQE: not enough memory\n");
+ goto dsbl_cqe_caps;
+ }
+
+ /*
+ * For dwcmshc host controller we have to enable internal clock
+ * before access to some registers from Vendor Specific Area 2.
+ */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (!(clk & SDHCI_CLOCK_INT_EN)) {
+ dev_err(mmc_dev(host->mmc), "Unable to setup CQE: internal clock enable error\n");
+ goto free_cq_host;
}
- err = clk_bulk_prepare_enable(RK3568_MAX_CLKS, priv->rockchip_clks);
+ cq_host->mmio = host->ioaddr + priv->vendor_specific_area2;
+ if (pltfm_data->cqhci_host_ops)
+ cq_host->ops = pltfm_data->cqhci_host_ops;
+ else
+ cq_host->ops = &dwcmshc_cqhci_ops;
+
+ /* Enable using of 128-bit task descriptors */
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64) {
+ dev_dbg(mmc_dev(host->mmc), "128-bit task descriptors\n");
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ }
+ err = cqhci_init(cq_host, host->mmc, dma64);
if (err) {
- dev_err(mmc_dev(host->mmc), "failed to enable clocks %d\n", err);
- return err;
+ dev_err(mmc_dev(host->mmc), "Unable to setup CQE: error %d\n", err);
+ goto int_clock_disable;
}
- if (of_property_read_u8(mmc_dev(host->mmc)->of_node, "rockchip,txclk-tapnum",
- &priv->txclk_tapnum))
- priv->txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
+ dev_dbg(mmc_dev(host->mmc), "CQE init done\n");
- /* Disable cmd conflict check */
- sdhci_writel(host, 0x0, dwc_priv->vendor_specific_area1 + DWCMSHC_HOST_CTRL3);
- /* Reset previous settings */
- sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
- sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_STRBIN);
+ return;
- return 0;
+int_clock_disable:
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+free_cq_host:
+ devm_kfree(&pdev->dev, cq_host);
+
+dsbl_cqe_caps:
+ host->mmc->caps2 &= ~(MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD);
}
static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
{
+ .compatible = "rockchip,rk3588-dwcmshc",
+ .data = &sdhci_dwcmshc_rk35xx_pdata,
+ },
+ {
+ .compatible = "rockchip,rk3576-dwcmshc",
+ .data = &sdhci_dwcmshc_rk3576_pdata,
+ },
+ {
.compatible = "rockchip,rk3568-dwcmshc",
- .data = &sdhci_dwcmshc_rk3568_pdata,
+ .data = &sdhci_dwcmshc_rk35xx_pdata,
},
{
.compatible = "snps,dwcmshc-sdhci",
.data = &sdhci_dwcmshc_pdata,
},
+ {
+ .compatible = "sophgo,cv1800b-dwcmshc",
+ .data = &sdhci_dwcmshc_cv18xx_pdata,
+ },
+ {
+ .compatible = "sophgo,sg2002-dwcmshc",
+ .data = &sdhci_dwcmshc_cv18xx_pdata,
+ },
+ {
+ .compatible = "thead,th1520-dwcmshc",
+ .data = &sdhci_dwcmshc_th1520_pdata,
+ },
+ {
+ .compatible = "sophgo,sg2042-dwcmshc",
+ .data = &sdhci_dwcmshc_sg2042_pdata,
+ },
+ {
+ .compatible = "eswin,eic7700-dwcmshc",
+ .data = &sdhci_dwcmshc_eic7700_pdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id sdhci_dwcmshc_acpi_ids[] = {
- { .id = "MLNXBF30" },
+ {
+ .id = "MLNXBF30",
+ .driver_data = (kernel_ulong_t)&sdhci_dwcmshc_bf3_pdata,
+ },
{}
};
+MODULE_DEVICE_TABLE(acpi, sdhci_dwcmshc_acpi_ids);
#endif
static int dwcmshc_probe(struct platform_device *pdev)
@@ -347,18 +1949,17 @@ static int dwcmshc_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
struct dwcmshc_priv *priv;
- struct rk3568_priv *rk_priv = NULL;
- const struct sdhci_pltfm_data *pltfm_data;
+ const struct dwcmshc_pltfm_data *pltfm_data;
int err;
- u32 extra;
+ u32 extra, caps;
- pltfm_data = of_device_get_match_data(&pdev->dev);
+ pltfm_data = device_get_match_data(&pdev->dev);
if (!pltfm_data) {
dev_err(&pdev->dev, "Error: No device match data found\n");
return -ENODEV;
}
- host = sdhci_pltfm_init(pdev, pltfm_data,
+ host = sdhci_pltfm_init(pdev, &pltfm_data->pdata,
sizeof(struct dwcmshc_priv));
if (IS_ERR(host))
return PTR_ERR(host);
@@ -376,14 +1977,13 @@ static int dwcmshc_probe(struct platform_device *pdev)
if (dev->of_node) {
pltfm_host->clk = devm_clk_get(dev, "core");
- if (IS_ERR(pltfm_host->clk)) {
- err = PTR_ERR(pltfm_host->clk);
- dev_err(dev, "failed to get core clk: %d\n", err);
- goto free_pltfm;
- }
+ if (IS_ERR(pltfm_host->clk))
+ return dev_err_probe(dev, PTR_ERR(pltfm_host->clk),
+ "failed to get core clk\n");
+
err = clk_prepare_enable(pltfm_host->clk);
if (err)
- goto free_pltfm;
+ return err;
priv->bus_clk = devm_clk_get(dev, "bus");
if (!IS_ERR(priv->bus_clk))
@@ -401,68 +2001,109 @@ static int dwcmshc_probe(struct platform_device *pdev)
host->mmc_host_ops.request = dwcmshc_request;
host->mmc_host_ops.hs400_enhanced_strobe = dwcmshc_hs400_enhanced_strobe;
+ host->mmc_host_ops.execute_tuning = dwcmshc_execute_tuning;
- if (pltfm_data == &sdhci_dwcmshc_rk3568_pdata) {
- rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk3568_priv), GFP_KERNEL);
- if (!rk_priv) {
- err = -ENOMEM;
- goto err_clk;
- }
-
- priv->priv = rk_priv;
-
- err = dwcmshc_rk3568_init(host, priv);
+ if (pltfm_data->init) {
+ err = pltfm_data->init(&pdev->dev, host, priv);
if (err)
goto err_clk;
}
+#ifdef CONFIG_ACPI
+ if (pltfm_data == &sdhci_dwcmshc_bf3_pdata)
+ sdhci_enable_v4_mode(host);
+#endif
+
+ caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ if (caps & SDHCI_CAN_64BIT_V4)
+ sdhci_enable_v4_mode(host);
+
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- err = sdhci_add_host(host);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ err = sdhci_setup_host(host);
if (err)
- goto err_clk;
+ goto err_rpm;
+
+ /* Setup Command Queue Engine if enabled */
+ if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
+ priv->vendor_specific_area2 =
+ sdhci_readw(host, DWCMSHC_P_VENDOR_AREA2);
+
+ dwcmshc_cqhci_init(host, pdev, pltfm_data);
+ }
+
+ if (pltfm_data->postinit)
+ pltfm_data->postinit(host, priv);
+
+ err = __sdhci_add_host(host);
+ if (err)
+ goto err_setup_host;
+
+ pm_runtime_put(dev);
return 0;
+err_setup_host:
+ sdhci_cleanup_host(host);
+err_rpm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
err_clk:
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
- if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
- rk_priv->rockchip_clks);
-free_pltfm:
- sdhci_pltfm_free(pdev);
+ clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
return err;
}
-static int dwcmshc_remove(struct platform_device *pdev)
+static void dwcmshc_disable_card_clk(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (ctrl & SDHCI_CLOCK_CARD_EN) {
+ ctrl &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+ }
+}
+
+static void dwcmshc_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
+
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
sdhci_remove_host(host, 0);
+ dwcmshc_disable_card_clk(host);
+
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
- if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
- rk_priv->rockchip_clks);
- sdhci_pltfm_free(pdev);
-
- return 0;
+ clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
}
-#ifdef CONFIG_PM_SLEEP
static int dwcmshc_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
int ret;
+ pm_runtime_resume(dev);
+
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
ret = sdhci_suspend_host(host);
if (ret)
return ret;
@@ -471,9 +2112,7 @@ static int dwcmshc_suspend(struct device *dev)
if (!IS_ERR(priv->bus_clk))
clk_disable_unprepare(priv->bus_clk);
- if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
- rk_priv->rockchip_clks);
+ clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
return ret;
}
@@ -483,7 +2122,6 @@ static int dwcmshc_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
int ret;
ret = clk_prepare_enable(pltfm_host->clk);
@@ -493,21 +2131,57 @@ static int dwcmshc_resume(struct device *dev)
if (!IS_ERR(priv->bus_clk)) {
ret = clk_prepare_enable(priv->bus_clk);
if (ret)
- return ret;
+ goto disable_clk;
}
- if (rk_priv) {
- ret = clk_bulk_prepare_enable(RK3568_MAX_CLKS,
- rk_priv->rockchip_clks);
+ ret = clk_bulk_prepare_enable(priv->num_other_clks, priv->other_clks);
+ if (ret)
+ goto disable_bus_clk;
+
+ ret = sdhci_resume_host(host);
+ if (ret)
+ goto disable_other_clks;
+
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_resume(host->mmc);
if (ret)
- return ret;
+ goto disable_other_clks;
}
- return sdhci_resume_host(host);
+ return 0;
+
+disable_other_clks:
+ clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
+disable_bus_clk:
+ if (!IS_ERR(priv->bus_clk))
+ clk_disable_unprepare(priv->bus_clk);
+disable_clk:
+ clk_disable_unprepare(pltfm_host->clk);
+ return ret;
+}
+
+static int dwcmshc_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ dwcmshc_disable_card_clk(host);
+
+ return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(dwcmshc_pmops, dwcmshc_suspend, dwcmshc_resume);
+static int dwcmshc_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ dwcmshc_enable_card_clk(host);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dwcmshc_pmops = {
+ SYSTEM_SLEEP_PM_OPS(dwcmshc_suspend, dwcmshc_resume)
+ RUNTIME_PM_OPS(dwcmshc_runtime_suspend, dwcmshc_runtime_resume, NULL)
+};
static struct platform_driver sdhci_dwcmshc_driver = {
.driver = {
@@ -515,10 +2189,10 @@ static struct platform_driver sdhci_dwcmshc_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_dwcmshc_dt_ids,
.acpi_match_table = ACPI_PTR(sdhci_dwcmshc_acpi_ids),
- .pm = &dwcmshc_pmops,
+ .pm = pm_ptr(&dwcmshc_pmops),
},
.probe = dwcmshc_probe,
- .remove = dwcmshc_remove,
+ .remove = dwcmshc_remove,
};
module_platform_driver(sdhci_dwcmshc_driver);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index a593b1fbd69e..8345e2c5a034 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -42,6 +42,12 @@ static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
.max_clk[MMC_TIMING_SD_HS] = 46500000,
};
+static const struct esdhc_clk_fixup ls1043a_esdhc_clk = {
+ .sd_dflt_max_clk = 25000000,
+ .max_clk[MMC_TIMING_UHS_SDR104] = 116700000,
+ .max_clk[MMC_TIMING_MMC_HS200] = 116700000,
+};
+
static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
.sd_dflt_max_clk = 25000000,
.max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
@@ -63,6 +69,7 @@ static const struct esdhc_clk_fixup p1010_esdhc_clk = {
static const struct of_device_id sdhci_esdhc_of_match[] = {
{ .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
+ { .compatible = "fsl,ls1043a-esdhc", .data = &ls1043a_esdhc_clk},
{ .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
{ .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
{ .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
@@ -91,7 +98,7 @@ struct sdhci_esdhc {
};
/**
- * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
+ * esdhc_readl_fixup - Fixup the value read from incompatible eSDHC register
* to make it compatible with SD spec.
*
* @host: pointer to sdhci_host
@@ -126,6 +133,7 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}
}
+
/*
* The DAT[3:0] line signal levels and the CMD line signal level are
* not compatible with standard SDHC register. The line signal levels
@@ -137,6 +145,16 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
ret = value & 0x000fffff;
ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
ret |= (value << 1) & SDHCI_CMD_LVL;
+
+ /*
+ * Some controllers have unreliable Data Line Active
+ * bit for commands with busy signal. This affects
+ * Command Inhibit (data) bit. Just ignore it since
+ * MMC core driver has already polled card status
+ * with CMD13 after any command with busy siganl.
+ */
+ if (esdhc->quirk_ignore_data_inhibit)
+ ret &= ~SDHCI_DATA_INHIBIT;
return ret;
}
@@ -151,19 +169,6 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}
- /*
- * Some controllers have unreliable Data Line Active
- * bit for commands with busy signal. This affects
- * Command Inhibit (data) bit. Just ignore it since
- * MMC core driver has already polled card status
- * with CMD13 after any command with busy siganl.
- */
- if ((spec_reg == SDHCI_PRESENT_STATE) &&
- (esdhc->quirk_ignore_data_inhibit == true)) {
- ret = value & ~SDHCI_DATA_INHIBIT;
- return ret;
- }
-
ret = value;
return ret;
}
@@ -216,7 +221,7 @@ static u8 esdhc_readb_fixup(struct sdhci_host *host,
}
/**
- * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
+ * esdhc_writel_fixup - Fixup the SD spec register value so that it could be
* written into eSDHC register.
*
* @host: pointer to sdhci_host
@@ -524,12 +529,16 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
static int esdhc_of_enable_dma(struct sdhci_host *host)
{
+ int ret;
u32 value;
struct device *dev = mmc_dev(host->mmc);
if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
- of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
+ }
value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
@@ -900,6 +909,7 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
scfg_node = of_find_matching_node(NULL, scfg_device_ids);
if (scfg_node)
scfg_base = of_iomap(scfg_node, 0);
+ of_node_put(scfg_node);
if (scfg_base) {
sdhciovselcr = SDHCIOVSELCR_TGLEN |
SDHCIOVSELCR_VSELVAL;
@@ -930,7 +940,7 @@ static struct soc_device_attribute soc_tuning_erratum_type1[] = {
{ .family = "QorIQ T1040", },
{ .family = "QorIQ T2080", },
{ .family = "QorIQ LS1021A", },
- { },
+ { /* sentinel */ }
};
static struct soc_device_attribute soc_tuning_erratum_type2[] = {
@@ -940,7 +950,7 @@ static struct soc_device_attribute soc_tuning_erratum_type2[] = {
{ .family = "QorIQ LS1080A", },
{ .family = "QorIQ LS2080A", },
{ .family = "QorIQ LA1575A", },
- { },
+ { /* sentinel */ }
};
static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
@@ -1224,7 +1234,6 @@ static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
return intmask;
}
-#ifdef CONFIG_PM_SLEEP
static u32 esdhc_proctl;
static int esdhc_of_suspend(struct device *dev)
{
@@ -1250,11 +1259,8 @@ static int esdhc_of_resume(struct device *dev)
}
return ret;
}
-#endif
-static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
- esdhc_of_suspend,
- esdhc_of_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops, esdhc_of_suspend, esdhc_of_resume);
static const struct sdhci_ops sdhci_esdhc_be_ops = {
.read_l = esdhc_be_readl,
@@ -1312,21 +1318,21 @@ static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
static struct soc_device_attribute soc_incorrect_hostver[] = {
{ .family = "QorIQ T4240", .revision = "1.0", },
{ .family = "QorIQ T4240", .revision = "2.0", },
- { },
+ { /* sentinel */ }
};
static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
{ .family = "QorIQ LX2160A", .revision = "2.0", },
{ .family = "QorIQ LS1028A", .revision = "1.0", },
- { },
+ { /* sentinel */ }
};
static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
{ .family = "QorIQ LX2160A", .revision = "2.0", },
{ .family = "QorIQ LS1028A", .revision = "1.0", },
- { },
+ { /* sentinel */ }
};
static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
@@ -1414,7 +1420,7 @@ static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
static int sdhci_esdhc_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
- struct device_node *np;
+ struct device_node *np, *tp;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_esdhc *esdhc;
int ret;
@@ -1459,7 +1465,9 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
if (esdhc->vendor_ver > VENDOR_V_22)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
- if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
+ tp = of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc");
+ if (tp) {
+ of_node_put(tp);
host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
}
@@ -1487,18 +1495,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
/* call to generic mmc_of_parse to support additional capabilities */
ret = mmc_of_parse(host->mmc);
if (ret)
- goto err;
+ return ret;
mmc_of_parse_voltage(host->mmc, &host->ocr_mask);
- ret = sdhci_add_host(host);
- if (ret)
- goto err;
-
- return 0;
- err:
- sdhci_pltfm_free(pdev);
- return ret;
+ return sdhci_add_host(host);
}
static struct platform_driver sdhci_esdhc_driver = {
@@ -1506,10 +1507,10 @@ static struct platform_driver sdhci_esdhc_driver = {
.name = "sdhci-esdhc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_esdhc_of_match,
- .pm = &esdhc_of_dev_pm_ops,
+ .pm = pm_sleep_ptr(&esdhc_of_dev_pm_ops),
},
.probe = sdhci_esdhc_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_esdhc_driver);
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 12675797b296..5bb845d13599 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -68,7 +68,7 @@ static const struct sdhci_pltfm_data sdhci_hlwd_pdata = {
static int sdhci_hlwd_probe(struct platform_device *pdev)
{
- return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata, 0);
+ return sdhci_pltfm_init_and_add_host(pdev, &sdhci_hlwd_pdata, 0);
}
static const struct of_device_id sdhci_hlwd_of_match[] = {
@@ -85,7 +85,7 @@ static struct platform_driver sdhci_hlwd_driver = {
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_hlwd_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_hlwd_driver);
diff --git a/drivers/mmc/host/sdhci-of-k1.c b/drivers/mmc/host/sdhci-of-k1.c
new file mode 100644
index 000000000000..0cc97e23a2f9
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-k1.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (c) 2025 Yixun Lan <dlan@gentoo.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/init.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+#define SPACEMIT_SDHC_MMC_CTRL_REG 0x114
+#define SDHC_MISC_INT_EN BIT(1)
+#define SDHC_MISC_INT BIT(2)
+#define SDHC_ENHANCE_STROBE_EN BIT(8)
+#define SDHC_MMC_HS400 BIT(9)
+#define SDHC_MMC_HS200 BIT(10)
+#define SDHC_MMC_CARD_MODE BIT(12)
+
+#define SPACEMIT_SDHC_TX_CFG_REG 0x11C
+#define SDHC_TX_INT_CLK_SEL BIT(30)
+#define SDHC_TX_MUX_SEL BIT(31)
+
+#define SPACEMIT_SDHC_PHY_CTRL_REG 0x160
+#define SDHC_PHY_FUNC_EN BIT(0)
+#define SDHC_PHY_PLL_LOCK BIT(1)
+#define SDHC_HOST_LEGACY_MODE BIT(31)
+
+#define SPACEMIT_SDHC_PHY_FUNC_REG 0x164
+#define SDHC_PHY_TEST_EN BIT(7)
+#define SDHC_HS200_USE_RFIFO BIT(15)
+
+#define SPACEMIT_SDHC_PHY_DLLCFG 0x168
+#define SDHC_DLL_PREDLY_NUM GENMASK(3, 2)
+#define SDHC_DLL_FULLDLY_RANGE GENMASK(5, 4)
+#define SDHC_DLL_VREG_CTRL GENMASK(7, 6)
+#define SDHC_DLL_ENABLE BIT(31)
+
+#define SPACEMIT_SDHC_PHY_DLLCFG1 0x16C
+#define SDHC_DLL_REG1_CTRL GENMASK(7, 0)
+#define SDHC_DLL_REG2_CTRL GENMASK(15, 8)
+#define SDHC_DLL_REG3_CTRL GENMASK(23, 16)
+#define SDHC_DLL_REG4_CTRL GENMASK(31, 24)
+
+#define SPACEMIT_SDHC_PHY_DLLSTS 0x170
+#define SDHC_DLL_LOCK_STATE BIT(0)
+
+#define SPACEMIT_SDHC_PHY_PADCFG_REG 0x178
+#define SDHC_PHY_DRIVE_SEL GENMASK(2, 0)
+#define SDHC_RX_BIAS_CTRL BIT(5)
+
+struct spacemit_sdhci_host {
+ struct clk *clk_core;
+ struct clk *clk_io;
+};
+
+/* All helper functions will update clr/set while preserve rest bits */
+static inline void spacemit_sdhci_setbits(struct sdhci_host *host, u32 val, int reg)
+{
+ sdhci_writel(host, sdhci_readl(host, reg) | val, reg);
+}
+
+static inline void spacemit_sdhci_clrbits(struct sdhci_host *host, u32 val, int reg)
+{
+ sdhci_writel(host, sdhci_readl(host, reg) & ~val, reg);
+}
+
+static inline void spacemit_sdhci_clrsetbits(struct sdhci_host *host, u32 clr, u32 set, int reg)
+{
+ u32 val = sdhci_readl(host, reg);
+
+ val = (val & ~clr) | set;
+ sdhci_writel(host, val, reg);
+}
+
+static void spacemit_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+
+ if (mask != SDHCI_RESET_ALL)
+ return;
+
+ spacemit_sdhci_setbits(host, SDHC_PHY_FUNC_EN | SDHC_PHY_PLL_LOCK,
+ SPACEMIT_SDHC_PHY_CTRL_REG);
+
+ spacemit_sdhci_clrsetbits(host, SDHC_PHY_DRIVE_SEL,
+ SDHC_RX_BIAS_CTRL | FIELD_PREP(SDHC_PHY_DRIVE_SEL, 4),
+ SPACEMIT_SDHC_PHY_PADCFG_REG);
+
+ if (!(host->mmc->caps2 & MMC_CAP2_NO_MMC))
+ spacemit_sdhci_setbits(host, SDHC_MMC_CARD_MODE, SPACEMIT_SDHC_MMC_CTRL_REG);
+}
+
+static void spacemit_sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+{
+ if (timing == MMC_TIMING_MMC_HS200)
+ spacemit_sdhci_setbits(host, SDHC_MMC_HS200, SPACEMIT_SDHC_MMC_CTRL_REG);
+
+ if (timing == MMC_TIMING_MMC_HS400)
+ spacemit_sdhci_setbits(host, SDHC_MMC_HS400, SPACEMIT_SDHC_MMC_CTRL_REG);
+
+ sdhci_set_uhs_signaling(host, timing);
+
+ if (!(host->mmc->caps2 & MMC_CAP2_NO_SDIO))
+ spacemit_sdhci_setbits(host, SDHCI_CTRL_VDD_180, SDHCI_HOST_CONTROL2);
+}
+
+static void spacemit_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (mmc->ios.timing <= MMC_TIMING_UHS_SDR50)
+ spacemit_sdhci_setbits(host, SDHC_TX_INT_CLK_SEL, SPACEMIT_SDHC_TX_CFG_REG);
+ else
+ spacemit_sdhci_clrbits(host, SDHC_TX_INT_CLK_SEL, SPACEMIT_SDHC_TX_CFG_REG);
+
+ sdhci_set_clock(host, clock);
+};
+
+static void spacemit_sdhci_phy_dll_init(struct sdhci_host *host)
+{
+ u32 state;
+ int ret;
+
+ spacemit_sdhci_clrsetbits(host, SDHC_DLL_PREDLY_NUM |
+ SDHC_DLL_FULLDLY_RANGE |
+ SDHC_DLL_VREG_CTRL,
+ FIELD_PREP(SDHC_DLL_PREDLY_NUM, 1) |
+ FIELD_PREP(SDHC_DLL_FULLDLY_RANGE, 1) |
+ FIELD_PREP(SDHC_DLL_VREG_CTRL, 1),
+ SPACEMIT_SDHC_PHY_DLLCFG);
+
+ spacemit_sdhci_clrsetbits(host, SDHC_DLL_REG1_CTRL,
+ FIELD_PREP(SDHC_DLL_REG1_CTRL, 0x92),
+ SPACEMIT_SDHC_PHY_DLLCFG1);
+
+ spacemit_sdhci_setbits(host, SDHC_DLL_ENABLE, SPACEMIT_SDHC_PHY_DLLCFG);
+
+ ret = readl_poll_timeout(host->ioaddr + SPACEMIT_SDHC_PHY_DLLSTS, state,
+ state & SDHC_DLL_LOCK_STATE, 2, 100);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc), "fail to lock phy dll in 100us!\n");
+}
+
+static void spacemit_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!ios->enhanced_strobe) {
+ spacemit_sdhci_clrbits(host, SDHC_ENHANCE_STROBE_EN, SPACEMIT_SDHC_MMC_CTRL_REG);
+ return;
+ }
+
+ spacemit_sdhci_setbits(host, SDHC_ENHANCE_STROBE_EN, SPACEMIT_SDHC_MMC_CTRL_REG);
+ spacemit_sdhci_phy_dll_init(host);
+}
+
+static unsigned int spacemit_sdhci_clk_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk);
+}
+
+static int spacemit_sdhci_pre_select_hs400(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ spacemit_sdhci_setbits(host, SDHC_MMC_HS400, SPACEMIT_SDHC_MMC_CTRL_REG);
+
+ return 0;
+}
+
+static void spacemit_sdhci_post_select_hs400(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ spacemit_sdhci_phy_dll_init(host);
+}
+
+static void spacemit_sdhci_pre_hs400_to_hs200(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ spacemit_sdhci_clrbits(host, SDHC_PHY_FUNC_EN | SDHC_PHY_PLL_LOCK,
+ SPACEMIT_SDHC_PHY_CTRL_REG);
+ spacemit_sdhci_clrbits(host, SDHC_MMC_HS400 | SDHC_MMC_HS200 | SDHC_ENHANCE_STROBE_EN,
+ SPACEMIT_SDHC_MMC_CTRL_REG);
+ spacemit_sdhci_clrbits(host, SDHC_HS200_USE_RFIFO, SPACEMIT_SDHC_PHY_FUNC_REG);
+
+ udelay(5);
+
+ spacemit_sdhci_setbits(host, SDHC_PHY_FUNC_EN | SDHC_PHY_PLL_LOCK,
+ SPACEMIT_SDHC_PHY_CTRL_REG);
+}
+
+static inline int spacemit_sdhci_get_clocks(struct device *dev,
+ struct sdhci_pltfm_host *pltfm_host)
+{
+ struct spacemit_sdhci_host *sdhst = sdhci_pltfm_priv(pltfm_host);
+
+ sdhst->clk_core = devm_clk_get_enabled(dev, "core");
+ if (IS_ERR(sdhst->clk_core))
+ return -EINVAL;
+
+ sdhst->clk_io = devm_clk_get_enabled(dev, "io");
+ if (IS_ERR(sdhst->clk_io))
+ return -EINVAL;
+
+ pltfm_host->clk = sdhst->clk_io;
+
+ return 0;
+}
+
+static const struct sdhci_ops spacemit_sdhci_ops = {
+ .get_max_clock = spacemit_sdhci_clk_get_max_clock,
+ .reset = spacemit_sdhci_reset,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_clock = spacemit_sdhci_set_clock,
+ .set_uhs_signaling = spacemit_sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data spacemit_sdhci_k1_pdata = {
+ .ops = &spacemit_sdhci_ops,
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+};
+
+static const struct of_device_id spacemit_sdhci_of_match[] = {
+ { .compatible = "spacemit,k1-sdhci" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, spacemit_sdhci_of_match);
+
+static int spacemit_sdhci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spacemit_sdhci_host *sdhst;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_host *host;
+ struct mmc_host_ops *mops;
+ int ret;
+
+ host = sdhci_pltfm_init(pdev, &spacemit_sdhci_k1_pdata, sizeof(*sdhst));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_pltfm;
+
+ sdhci_get_of_property(pdev);
+
+ if (!(host->mmc->caps2 & MMC_CAP2_NO_MMC)) {
+ mops = &host->mmc_host_ops;
+ mops->hs400_prepare_ddr = spacemit_sdhci_pre_select_hs400;
+ mops->hs400_complete = spacemit_sdhci_post_select_hs400;
+ mops->hs400_downgrade = spacemit_sdhci_pre_hs400_to_hs200;
+ mops->hs400_enhanced_strobe = spacemit_sdhci_hs400_enhanced_strobe;
+ }
+
+ host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
+ ret = spacemit_sdhci_get_clocks(dev, pltfm_host);
+ if (ret)
+ goto err_pltfm;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_pltfm;
+
+ return 0;
+
+err_pltfm:
+ return ret;
+}
+
+static struct platform_driver spacemit_sdhci_driver = {
+ .driver = {
+ .name = "sdhci-spacemit",
+ .of_match_table = spacemit_sdhci_of_match,
+ },
+ .probe = spacemit_sdhci_probe,
+ .remove = sdhci_pltfm_remove,
+};
+module_platform_driver(spacemit_sdhci_driver);
+
+MODULE_DESCRIPTION("SpacemiT SDHCI platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-of-ma35d1.c b/drivers/mmc/host/sdhci-of-ma35d1.c
new file mode 100644
index 000000000000..287026422616
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-ma35d1.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2024 Nuvoton Technology Corp.
+ *
+ * Author: Shan-Chun Hung <shanchun1218@gmail.com>
+ */
+
+#include <linux/align.h>
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/build_bug.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "sdhci-pltfm.h"
+#include "sdhci.h"
+
+#define MA35_SYS_MISCFCR0 0x070
+#define MA35_SDHCI_MSHCCTL 0x508
+#define MA35_SDHCI_MBIUCTL 0x510
+
+#define MA35_SDHCI_CMD_CONFLICT_CHK BIT(0)
+#define MA35_SDHCI_INCR_MSK GENMASK(3, 0)
+#define MA35_SDHCI_INCR16 BIT(3)
+#define MA35_SDHCI_INCR8 BIT(2)
+
+struct ma35_priv {
+ struct reset_control *rst;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_uhs;
+ struct pinctrl_state *pins_default;
+};
+
+struct ma35_restore_data {
+ u32 reg;
+ u32 width;
+};
+
+static const struct ma35_restore_data restore_data[] = {
+ { SDHCI_CLOCK_CONTROL, sizeof(u32)},
+ { SDHCI_BLOCK_SIZE, sizeof(u32)},
+ { SDHCI_INT_ENABLE, sizeof(u32)},
+ { SDHCI_SIGNAL_ENABLE, sizeof(u32)},
+ { SDHCI_AUTO_CMD_STATUS, sizeof(u32)},
+ { SDHCI_HOST_CONTROL, sizeof(u32)},
+ { SDHCI_TIMEOUT_CONTROL, sizeof(u8) },
+ { MA35_SDHCI_MSHCCTL, sizeof(u16)},
+ { MA35_SDHCI_MBIUCTL, sizeof(u16)},
+};
+
+/*
+ * If DMA addr spans 128MB boundary, we split the DMA transfer into two
+ * so that each DMA transfer doesn't exceed the boundary.
+ */
+static void ma35_adma_write_desc(struct sdhci_host *host, void **desc, dma_addr_t addr, int len,
+ unsigned int cmd)
+{
+ int tmplen, offset;
+
+ if (likely(!len || (ALIGN(addr, SZ_128M) == ALIGN(addr + len - 1, SZ_128M)))) {
+ sdhci_adma_write_desc(host, desc, addr, len, cmd);
+ return;
+ }
+
+ offset = addr & (SZ_128M - 1);
+ tmplen = SZ_128M - offset;
+ sdhci_adma_write_desc(host, desc, addr, tmplen, cmd);
+
+ addr += tmplen;
+ len -= tmplen;
+ sdhci_adma_write_desc(host, desc, addr, len, cmd);
+}
+
+static void ma35_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u32 ctl;
+
+ /*
+ * If the clock frequency exceeds MMC_HIGH_52_MAX_DTR,
+ * disable command conflict check.
+ */
+ ctl = sdhci_readw(host, MA35_SDHCI_MSHCCTL);
+ if (clock > MMC_HIGH_52_MAX_DTR)
+ ctl &= ~MA35_SDHCI_CMD_CONFLICT_CHK;
+ else
+ ctl |= MA35_SDHCI_CMD_CONFLICT_CHK;
+ sdhci_writew(host, ctl, MA35_SDHCI_MSHCCTL);
+
+ sdhci_set_clock(host, clock);
+}
+
+static int ma35_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct ma35_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_180:
+ if (!IS_ERR(priv->pinctrl) && !IS_ERR(priv->pins_uhs))
+ pinctrl_select_state(priv->pinctrl, priv->pins_uhs);
+ break;
+ case MMC_SIGNAL_VOLTAGE_330:
+ if (!IS_ERR(priv->pinctrl) && !IS_ERR(priv->pins_default))
+ pinctrl_select_state(priv->pinctrl, priv->pins_default);
+ break;
+ default:
+ dev_err(mmc_dev(host->mmc), "Unsupported signal voltage!\n");
+ return -EINVAL;
+ }
+
+ return sdhci_start_signal_voltage_switch(mmc, ios);
+}
+
+static void ma35_voltage_switch(struct sdhci_host *host)
+{
+ /* Wait for 5ms after set 1.8V signal enable bit */
+ fsleep(5000);
+}
+
+static int ma35_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct ma35_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ int idx;
+ u32 regs[ARRAY_SIZE(restore_data)] = {};
+
+ /*
+ * Limitations require a reset of SD/eMMC before tuning and
+ * saving the registers before resetting, then restoring
+ * after the reset.
+ */
+ for (idx = 0; idx < ARRAY_SIZE(restore_data); idx++) {
+ if (restore_data[idx].width == sizeof(u32))
+ regs[idx] = sdhci_readl(host, restore_data[idx].reg);
+ else if (restore_data[idx].width == sizeof(u16))
+ regs[idx] = sdhci_readw(host, restore_data[idx].reg);
+ else if (restore_data[idx].width == sizeof(u8))
+ regs[idx] = sdhci_readb(host, restore_data[idx].reg);
+ }
+
+ reset_control_assert(priv->rst);
+ reset_control_deassert(priv->rst);
+
+ for (idx = 0; idx < ARRAY_SIZE(restore_data); idx++) {
+ if (restore_data[idx].width == sizeof(u32))
+ sdhci_writel(host, regs[idx], restore_data[idx].reg);
+ else if (restore_data[idx].width == sizeof(u16))
+ sdhci_writew(host, regs[idx], restore_data[idx].reg);
+ else if (restore_data[idx].width == sizeof(u8))
+ sdhci_writeb(host, regs[idx], restore_data[idx].reg);
+ }
+
+ return sdhci_execute_tuning(mmc, opcode);
+}
+
+static const struct sdhci_ops sdhci_ma35_ops = {
+ .set_clock = ma35_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .reset = sdhci_reset,
+ .adma_write_desc = ma35_adma_write_desc,
+ .voltage_switch = ma35_voltage_switch,
+};
+
+static const struct sdhci_pltfm_data sdhci_ma35_pdata = {
+ .ops = &sdhci_ma35_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_BROKEN_DDR50 |
+ SDHCI_QUIRK2_ACMD23_BROKEN,
+};
+
+static int ma35_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_host *host;
+ struct ma35_priv *priv;
+ int err;
+ u32 extra, ctl;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_ma35_pdata, sizeof(struct ma35_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ /* Extra adma table cnt for cross 128M boundary handling. */
+ extra = DIV_ROUND_UP_ULL(dma_get_required_mask(dev), SZ_128M);
+ extra = min(extra, SDHCI_MAX_SEGS);
+
+ host->adma_table_cnt += extra;
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
+
+ pltfm_host->clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(pltfm_host->clk))
+ return dev_err_probe(dev, PTR_ERR(pltfm_host->clk),
+ "failed to get clk\n");
+
+ err = mmc_of_parse(host->mmc);
+ if (err)
+ return err;
+
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst))
+ return dev_err_probe(dev, PTR_ERR(priv->rst),
+ "failed to get reset control\n");
+
+ sdhci_get_of_property(pdev);
+
+ priv->pinctrl = devm_pinctrl_get(dev);
+ if (!IS_ERR(priv->pinctrl)) {
+ priv->pins_default = pinctrl_lookup_state(priv->pinctrl, "default");
+ priv->pins_uhs = pinctrl_lookup_state(priv->pinctrl, "state_uhs");
+ pinctrl_select_state(priv->pinctrl, priv->pins_default);
+ }
+
+ if (!(host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)) {
+ struct regmap *regmap;
+ u32 reg;
+
+ regmap = syscon_regmap_lookup_by_phandle(dev_of_node(dev), "nuvoton,sys");
+ if (!IS_ERR(regmap)) {
+ /* Enable SDHCI voltage stable for 1.8V */
+ regmap_read(regmap, MA35_SYS_MISCFCR0, &reg);
+ reg |= BIT(17);
+ regmap_write(regmap, MA35_SYS_MISCFCR0, reg);
+ }
+
+ host->mmc_host_ops.start_signal_voltage_switch =
+ ma35_start_signal_voltage_switch;
+ }
+
+ host->mmc_host_ops.execute_tuning = ma35_execute_tuning;
+
+ err = sdhci_add_host(host);
+ if (err)
+ return err;
+
+ /*
+ * Split data into chunks of 16 or 8 bytes for transmission.
+ * Each chunk transfer is guaranteed to be uninterrupted on the bus.
+ * This likely corresponds to the AHB bus DMA burst size.
+ */
+ ctl = sdhci_readw(host, MA35_SDHCI_MBIUCTL);
+ ctl &= ~MA35_SDHCI_INCR_MSK;
+ ctl |= MA35_SDHCI_INCR16 | MA35_SDHCI_INCR8;
+ sdhci_writew(host, ctl, MA35_SDHCI_MBIUCTL);
+
+ return 0;
+}
+
+static void ma35_disable_card_clk(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (ctrl & SDHCI_CLOCK_CARD_EN) {
+ ctrl &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+ }
+}
+
+static void ma35_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+
+ sdhci_remove_host(host, 0);
+ ma35_disable_card_clk(host);
+}
+
+static const struct of_device_id sdhci_ma35_dt_ids[] = {
+ { .compatible = "nuvoton,ma35d1-sdhci" },
+ {}
+};
+
+static struct platform_driver sdhci_ma35_driver = {
+ .driver = {
+ .name = "sdhci-ma35",
+ .of_match_table = sdhci_ma35_dt_ids,
+ },
+ .probe = ma35_probe,
+ .remove = ma35_remove,
+};
+module_platform_driver(sdhci_ma35_driver);
+
+MODULE_DESCRIPTION("SDHCI platform driver for Nuvoton MA35");
+MODULE_AUTHOR("Shan-Chun Hung <shanchun1218@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-of-sparx5.c b/drivers/mmc/host/sdhci-of-sparx5.c
index 28e4ee69e100..b3db1e2c4c0e 100644
--- a/drivers/mmc/host/sdhci-of-sparx5.c
+++ b/drivers/mmc/host/sdhci-of-sparx5.c
@@ -13,9 +13,9 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/regmap.h>
-#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/dma-mapping.h>
+#include <linux/of.h>
#include "sdhci-pltfm.h"
@@ -184,15 +184,10 @@ static int sdhci_sparx5_probe(struct platform_device *pdev)
sdhci_sparx5 = sdhci_pltfm_priv(pltfm_host);
sdhci_sparx5->host = host;
- pltfm_host->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(pltfm_host->clk)) {
- ret = PTR_ERR(pltfm_host->clk);
- dev_err(&pdev->dev, "failed to get core clk: %d\n", ret);
- goto free_pltfm;
- }
- ret = clk_prepare_enable(pltfm_host->clk);
- if (ret)
- goto free_pltfm;
+ pltfm_host->clk = devm_clk_get_enabled(&pdev->dev, "core");
+ if (IS_ERR(pltfm_host->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pltfm_host->clk),
+ "failed to get and enable core clk\n");
if (!of_property_read_u32(np, "microchip,clock-delay", &value) &&
(value > 0 && value <= MSHC_DLY_CC_MAX))
@@ -202,14 +197,12 @@ static int sdhci_sparx5_probe(struct platform_device *pdev)
ret = mmc_of_parse(host->mmc);
if (ret)
- goto err_clk;
+ return ret;
sdhci_sparx5->cpu_ctrl = syscon_regmap_lookup_by_compatible(syscon);
- if (IS_ERR(sdhci_sparx5->cpu_ctrl)) {
- dev_err(&pdev->dev, "No CPU syscon regmap !\n");
- ret = PTR_ERR(sdhci_sparx5->cpu_ctrl);
- goto err_clk;
- }
+ if (IS_ERR(sdhci_sparx5->cpu_ctrl))
+ return dev_err_probe(&pdev->dev, PTR_ERR(sdhci_sparx5->cpu_ctrl),
+ "No CPU syscon regmap !\n");
if (sdhci_sparx5->delay_clock >= 0)
sparx5_set_delay(host, sdhci_sparx5->delay_clock);
@@ -225,7 +218,7 @@ static int sdhci_sparx5_probe(struct platform_device *pdev)
ret = sdhci_add_host(host);
if (ret)
- goto err_clk;
+ return ret;
/* Set AXI bus master to use un-cached access (for DMA) */
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA) &&
@@ -238,12 +231,6 @@ static int sdhci_sparx5_probe(struct platform_device *pdev)
mmc_hostname(host->mmc), sdhci_readl(host, MSHC2_TYPE));
return ret;
-
-err_clk:
- clk_disable_unprepare(pltfm_host->clk);
-free_pltfm:
- sdhci_pltfm_free(pdev);
- return ret;
}
static const struct of_device_id sdhci_sparx5_of_match[] = {
@@ -260,7 +247,7 @@ static struct platform_driver sdhci_sparx5_driver = {
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_sparx5_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_sparx5_driver);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 8f4d1f003f65..b5d7c1a80a92 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* SDHCI Controller driver for TI's OMAP SoCs
*
* Copyright (C) 2017 Texas Instruments
@@ -11,9 +11,10 @@
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/sys_soc.h>
@@ -21,7 +22,14 @@
#include "sdhci-pltfm.h"
-#define SDHCI_OMAP_CON 0x12c
+/*
+ * Note that the register offsets used here are from omap_regs
+ * base which is 0x100 for omap4 and later, and 0 for omap3 and
+ * earlier.
+ */
+#define SDHCI_OMAP_SYSCONFIG 0x10
+
+#define SDHCI_OMAP_CON 0x2c
#define CON_DW8 BIT(5)
#define CON_DMA_MASTER BIT(20)
#define CON_DDR BIT(19)
@@ -31,20 +39,20 @@
#define CON_INIT BIT(1)
#define CON_OD BIT(0)
-#define SDHCI_OMAP_DLL 0x0134
+#define SDHCI_OMAP_DLL 0x34
#define DLL_SWT BIT(20)
#define DLL_FORCE_SR_C_SHIFT 13
#define DLL_FORCE_SR_C_MASK (0x7f << DLL_FORCE_SR_C_SHIFT)
#define DLL_FORCE_VALUE BIT(12)
#define DLL_CALIB BIT(1)
-#define SDHCI_OMAP_CMD 0x20c
+#define SDHCI_OMAP_CMD 0x10c
-#define SDHCI_OMAP_PSTATE 0x0224
+#define SDHCI_OMAP_PSTATE 0x124
#define PSTATE_DLEV_DAT0 BIT(20)
#define PSTATE_DATI BIT(1)
-#define SDHCI_OMAP_HCTL 0x228
+#define SDHCI_OMAP_HCTL 0x128
#define HCTL_SDBP BIT(8)
#define HCTL_SDVS_SHIFT 9
#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT)
@@ -52,26 +60,28 @@
#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT)
#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT)
-#define SDHCI_OMAP_SYSCTL 0x22c
+#define SDHCI_OMAP_SYSCTL 0x12c
#define SYSCTL_CEN BIT(2)
#define SYSCTL_CLKD_SHIFT 6
#define SYSCTL_CLKD_MASK 0x3ff
-#define SDHCI_OMAP_STAT 0x230
+#define SDHCI_OMAP_STAT 0x130
-#define SDHCI_OMAP_IE 0x234
+#define SDHCI_OMAP_IE 0x134
#define INT_CC_EN BIT(0)
-#define SDHCI_OMAP_AC12 0x23c
+#define SDHCI_OMAP_ISE 0x138
+
+#define SDHCI_OMAP_AC12 0x13c
#define AC12_V1V8_SIGEN BIT(19)
#define AC12_SCLK_SEL BIT(23)
-#define SDHCI_OMAP_CAPA 0x240
+#define SDHCI_OMAP_CAPA 0x140
#define CAPA_VS33 BIT(24)
#define CAPA_VS30 BIT(25)
#define CAPA_VS18 BIT(26)
-#define SDHCI_OMAP_CAPA2 0x0244
+#define SDHCI_OMAP_CAPA2 0x144
#define CAPA2_TSDR50 BIT(13)
#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */
@@ -89,7 +99,8 @@
#define SDHCI_OMAP_SPECIAL_RESET BIT(1)
struct sdhci_omap_data {
- u32 offset;
+ int omap_offset; /* Offset for omap regs from base */
+ u32 offset; /* Offset for SDHCI regs from base */
u8 flags;
};
@@ -107,12 +118,19 @@ struct sdhci_omap_host {
struct pinctrl *pinctrl;
struct pinctrl_state **pinctrl_state;
+ int wakeirq;
bool is_tuning;
+
+ /* Offset for omap specific registers from base */
+ int omap_offset;
+
/* Omap specific context save */
u32 con;
u32 hctl;
u32 sysctl;
u32 capa;
+ u32 ie;
+ u32 ise;
};
static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host);
@@ -121,13 +139,13 @@ static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host);
static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host,
unsigned int offset)
{
- return readl(host->base + offset);
+ return readl(host->base + host->omap_offset + offset);
}
static inline void sdhci_omap_writel(struct sdhci_omap_host *host,
unsigned int offset, u32 data)
{
- writel(data, host->base + offset);
+ writel(data, host->base + host->omap_offset + offset);
}
static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
@@ -172,7 +190,7 @@ static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
}
static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
- unsigned int iov)
+ unsigned int iov_pbias)
{
int ret;
struct sdhci_host *host = omap_host->host;
@@ -183,14 +201,15 @@ static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
return ret;
if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov);
- if (ret) {
+ /* Pick the right voltage to allow 3.0V for 3.3V nominal PBIAS */
+ ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios);
+ if (ret < 0) {
dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n");
return ret;
}
}
- ret = sdhci_omap_set_pbias(omap_host, true, iov);
+ ret = sdhci_omap_set_pbias(omap_host, true, iov_pbias);
if (ret)
return ret;
@@ -200,16 +219,28 @@ static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
unsigned char signal_voltage)
{
- u32 reg;
+ u32 reg, capa;
ktime_t timeout;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
reg &= ~HCTL_SDVS_MASK;
- if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- reg |= HCTL_SDVS_33;
- else
+ switch (signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (capa & CAPA_VS33)
+ reg |= HCTL_SDVS_33;
+ else if (capa & CAPA_VS30)
+ reg |= HCTL_SDVS_30;
+ else
+ dev_warn(omap_host->dev, "misconfigured CAPA: %08x\n",
+ capa);
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ default:
reg |= HCTL_SDVS_18;
+ break;
+ }
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
@@ -338,7 +369,7 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
/*
* Stage 1: Search for a maximum pass window ignoring any
- * any single point failures. If the tuning value ends up
+ * single point failures. If the tuning value ends up
* near it, move away from it in stage 2 below
*/
while (phase_delay <= MAX_PHASE_DELAY) {
@@ -527,28 +558,32 @@ static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc,
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
- if (!(reg & CAPA_VS33))
+ if (!(reg & (CAPA_VS30 | CAPA_VS33)))
return -EOPNOTSUPP;
+ if (reg & CAPA_VS30)
+ iov = IOV_3V0;
+ else
+ iov = IOV_3V3;
+
sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg &= ~AC12_V1V8_SIGEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
- iov = IOV_3V3;
} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
if (!(reg & CAPA_VS18))
return -EOPNOTSUPP;
+ iov = IOV_1V8;
+
sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg |= AC12_V1V8_SIGEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
-
- iov = IOV_1V8;
} else {
return -EOPNOTSUPP;
}
@@ -682,7 +717,24 @@ static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode,
{
struct mmc_host *mmc = host->mmc;
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+}
+
+/*
+ * MMCHS_HL_HWINFO has the MADMA_EN bit set if the controller instance
+ * is connected to L3 interconnect and is bus master capable. Note that
+ * the MMCHS_HL_HWINFO register is in the module registers before the
+ * omap registers and sdhci registers. The offset can vary for omap
+ * registers depending on the SoC. Do not use sdhci_omap_readl() here.
+ */
+static bool sdhci_omap_has_adma(struct sdhci_omap_host *omap_host, int offset)
+{
+ /* MMCHS_HL_HWINFO register is only available on omap4 and later */
+ if (offset < 0x200)
+ return false;
+
+ return readl(omap_host->base + 4) & 1;
}
static int sdhci_omap_enable_dma(struct sdhci_host *host)
@@ -792,6 +844,11 @@ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
unsigned long limit = MMC_TIMEOUT_US;
unsigned long i = 0;
+ u32 sysc;
+
+ /* Save target module sysconfig configured by SoC PM layer */
+ if (mask & SDHCI_RESET_ALL)
+ sysc = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCONFIG);
/* Don't reset data lines during tuning operation */
if (omap_host->is_tuning)
@@ -811,10 +868,15 @@ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
dev_err(mmc_dev(host->mmc),
"Timeout waiting on controller reset in %s\n",
__func__);
- return;
+
+ goto restore_sysc;
}
sdhci_reset(host, mask);
+
+restore_sysc:
+ if (mask & SDHCI_RESET_ALL)
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCONFIG, sysc);
}
#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
@@ -863,7 +925,7 @@ static void sdhci_omap_set_timeout(struct sdhci_host *host,
__sdhci_set_timeout(host, cmd);
}
-static struct sdhci_ops sdhci_omap_ops = {
+static const struct sdhci_ops sdhci_omap_ops = {
.set_clock = sdhci_omap_set_clock,
.set_power = sdhci_omap_set_power,
.enable_dma = sdhci_omap_enable_dma,
@@ -877,34 +939,73 @@ static struct sdhci_ops sdhci_omap_ops = {
.set_timeout = sdhci_omap_set_timeout,
};
-static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
+static unsigned int sdhci_omap_regulator_get_caps(struct device *dev,
+ const char *name)
{
- u32 reg;
- int ret = 0;
+ struct regulator *reg;
+ unsigned int caps = 0;
+
+ reg = regulator_get(dev, name);
+ if (IS_ERR(reg))
+ return ~0U;
+
+ if (regulator_is_supported_voltage(reg, 1700000, 1950000))
+ caps |= SDHCI_CAN_VDD_180;
+ if (regulator_is_supported_voltage(reg, 2700000, 3150000))
+ caps |= SDHCI_CAN_VDD_300;
+ if (regulator_is_supported_voltage(reg, 3150000, 3600000))
+ caps |= SDHCI_CAN_VDD_330;
+
+ regulator_put(reg);
+
+ return caps;
+}
+
+static int sdhci_omap_set_capabilities(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
struct device *dev = omap_host->dev;
- struct regulator *vqmmc;
+ const u32 mask = SDHCI_CAN_VDD_180 | SDHCI_CAN_VDD_300 | SDHCI_CAN_VDD_330;
+ unsigned int pbias, vqmmc, caps = 0;
+ u32 reg;
- vqmmc = regulator_get(dev, "vqmmc");
- if (IS_ERR(vqmmc)) {
- ret = PTR_ERR(vqmmc);
- goto reg_put;
- }
+ pbias = sdhci_omap_regulator_get_caps(dev, "pbias");
+ vqmmc = sdhci_omap_regulator_get_caps(dev, "vqmmc");
+ caps = pbias & vqmmc;
+
+ if (pbias != ~0U && vqmmc == ~0U)
+ dev_warn(dev, "vqmmc regulator missing for pbias\n");
+ else if (caps == ~0U)
+ return 0;
+
+ /*
+ * Quirk handling to allow 3.0V vqmmc with a valid 3.3V PBIAS. This is
+ * needed for 3.0V ldo9_reg on omap5 at least.
+ */
+ if (pbias != ~0U && (pbias & SDHCI_CAN_VDD_330) &&
+ (vqmmc & SDHCI_CAN_VDD_300))
+ caps |= SDHCI_CAN_VDD_330;
/* voltage capabilities might be set by boot loader, clear it */
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33);
- if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3))
- reg |= CAPA_VS33;
- if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8))
+ if (caps & SDHCI_CAN_VDD_180)
reg |= CAPA_VS18;
+ if (caps & SDHCI_CAN_VDD_300)
+ reg |= CAPA_VS30;
+
+ if (caps & SDHCI_CAN_VDD_330)
+ reg |= CAPA_VS33;
+
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg);
-reg_put:
- regulator_put(vqmmc);
+ host->caps &= ~mask;
+ host->caps |= caps;
- return ret;
+ return 0;
}
static const struct sdhci_pltfm_data sdhci_omap_pdata = {
@@ -920,26 +1021,56 @@ static const struct sdhci_pltfm_data sdhci_omap_pdata = {
.ops = &sdhci_omap_ops,
};
+static const struct sdhci_omap_data omap2430_data = {
+ .omap_offset = 0,
+ .offset = 0x100,
+};
+
+static const struct sdhci_omap_data omap3_data = {
+ .omap_offset = 0,
+ .offset = 0x100,
+};
+
+static const struct sdhci_omap_data omap4_data = {
+ .omap_offset = 0x100,
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
+static const struct sdhci_omap_data omap5_data = {
+ .omap_offset = 0x100,
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
static const struct sdhci_omap_data k2g_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
};
static const struct sdhci_omap_data am335_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data am437_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data dra7_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_REQUIRE_IODELAY,
};
static const struct of_device_id omap_sdhci_match[] = {
+ { .compatible = "ti,omap2430-sdhci", .data = &omap2430_data },
+ { .compatible = "ti,omap3-sdhci", .data = &omap3_data },
+ { .compatible = "ti,omap4-sdhci", .data = &omap4_data },
+ { .compatible = "ti,omap5-sdhci", .data = &omap5_data },
{ .compatible = "ti,dra7-sdhci", .data = &dra7_data },
{ .compatible = "ti,k2g-sdhci", .data = &k2g_data },
{ .compatible = "ti,am335-sdhci", .data = &am335_data },
@@ -1087,16 +1218,11 @@ static int sdhci_omap_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_omap_host *omap_host;
struct mmc_host *mmc;
- const struct of_device_id *match;
- struct sdhci_omap_data *data;
+ const struct sdhci_omap_data *data;
const struct soc_device_attribute *soc;
struct resource *regs;
- match = of_match_device(omap_sdhci_match, dev);
- if (!match)
- return -EINVAL;
-
- data = (struct sdhci_omap_data *)match->data;
+ data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(dev, "no sdhci omap data\n");
return -EINVAL;
@@ -1122,6 +1248,8 @@ static int sdhci_omap_probe(struct platform_device *pdev)
omap_host->power_mode = MMC_POWER_UNDEFINED;
omap_host->timing = MMC_TIMING_LEGACY;
omap_host->flags = data->flags;
+ omap_host->omap_offset = data->omap_offset;
+ omap_host->con = -EINVAL; /* Prevent invalid restore on first resume */
host->ioaddr += offset;
host->mapbase = regs->start + offset;
@@ -1129,7 +1257,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
ret = mmc_of_parse(mmc);
if (ret)
- goto err_pltfm_free;
+ return ret;
soc = soc_device_match(sdhci_omap_soc_devices);
if (soc) {
@@ -1142,26 +1270,23 @@ static int sdhci_omap_probe(struct platform_device *pdev)
mmc->f_max = 48000000;
}
- if (!mmc_can_gpio_ro(mmc))
+ if (!mmc_host_can_gpio_ro(mmc))
mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
pltfm_host->clk = devm_clk_get(dev, "fck");
- if (IS_ERR(pltfm_host->clk)) {
- ret = PTR_ERR(pltfm_host->clk);
- goto err_pltfm_free;
- }
+ if (IS_ERR(pltfm_host->clk))
+ return PTR_ERR(pltfm_host->clk);
ret = clk_set_rate(pltfm_host->clk, mmc->f_max);
- if (ret) {
- dev_err(dev, "failed to set clock to %d\n", mmc->f_max);
- goto err_pltfm_free;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to set clock to %d\n", mmc->f_max);
omap_host->pbias = devm_regulator_get_optional(dev, "pbias");
if (IS_ERR(omap_host->pbias)) {
ret = PTR_ERR(omap_host->pbias);
if (ret != -ENODEV)
- goto err_pltfm_free;
+ return ret;
dev_dbg(dev, "unable to get pbias regulator %d\n", ret);
}
omap_host->pbias_enabled = false;
@@ -1169,9 +1294,12 @@ static int sdhci_omap_probe(struct platform_device *pdev)
/*
* omap_device_pm_domain has callbacks to enable the main
* functional clock, interface clock and also configure the
- * SYSCONFIG register of omap devices. The callback will be invoked
- * as part of pm_runtime_get_sync.
+ * SYSCONFIG register to clear any boot loader set voltage
+ * capabilities before calling sdhci_setup_host(). The
+ * callback will be invoked as part of pm_runtime_get_sync.
*/
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret) {
@@ -1179,10 +1307,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
goto err_rpm_disable;
}
- ret = sdhci_omap_set_capabilities(omap_host);
+ ret = sdhci_omap_set_capabilities(host);
if (ret) {
dev_err(dev, "failed to set system capabilities\n");
- goto err_put_sync;
+ goto err_rpm_put;
}
host->mmc_host_ops.start_signal_voltage_switch =
@@ -1192,16 +1320,28 @@ static int sdhci_omap_probe(struct platform_device *pdev)
host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
- /* Switch to external DMA only if there is the "dmas" property */
- if (of_find_property(dev->of_node, "dmas", NULL))
+ /*
+ * Switch to external DMA only if there is the "dmas" property and
+ * ADMA is not available on the controller instance.
+ */
+ if (device_property_present(dev, "dmas") &&
+ !sdhci_omap_has_adma(omap_host, offset))
sdhci_switch_external_dma(host, true);
+ if (device_property_read_bool(dev, "ti,non-removable")) {
+ dev_warn_once(dev, "using old ti,non-removable property\n");
+ mmc->caps |= MMC_CAP_NONREMOVABLE;
+ }
+
/* R1B responses is required to properly manage HW busy detection. */
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+ /* Enable SDIO card power off. */
+ mmc->caps |= MMC_CAP_POWER_OFF_CARD;
+
ret = sdhci_setup_host(host);
if (ret)
- goto err_put_sync;
+ goto err_rpm_put;
ret = sdhci_omap_config_iodelay_pinctrl_state(omap_host);
if (ret)
@@ -1211,85 +1351,117 @@ static int sdhci_omap_probe(struct platform_device *pdev)
if (ret)
goto err_cleanup_host;
+ /*
+ * SDIO devices can use the dat1 pin as a wake-up interrupt. Some
+ * devices like wl1xxx, use an out-of-band GPIO interrupt instead.
+ */
+ omap_host->wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
+ if (omap_host->wakeirq == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_cleanup_host;
+ }
+ if (omap_host->wakeirq > 0) {
+ device_init_wakeup(dev, true);
+ ret = dev_pm_set_dedicated_wake_irq(dev, omap_host->wakeirq);
+ if (ret) {
+ device_init_wakeup(dev, false);
+ goto err_cleanup_host;
+ }
+ host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+ }
+
+ pm_runtime_put_autosuspend(dev);
+
return 0;
err_cleanup_host:
sdhci_cleanup_host(host);
-err_put_sync:
- pm_runtime_put_sync(dev);
-
+err_rpm_put:
+ pm_runtime_put_autosuspend(dev);
err_rpm_disable:
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
-
-err_pltfm_free:
- sdhci_pltfm_free(pdev);
return ret;
}
-static int sdhci_omap_remove(struct platform_device *pdev)
+static void sdhci_omap_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sdhci_host *host = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(dev);
sdhci_remove_host(host, true);
+ device_init_wakeup(dev, false);
+ dev_pm_clear_wake_irq(dev);
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
- sdhci_pltfm_free(pdev);
-
- return 0;
+ /* Ensure device gets disabled despite userspace sysfs config */
+ pm_runtime_force_suspend(dev);
}
-#ifdef CONFIG_PM_SLEEP
+
static void sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
{
omap_host->con = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
omap_host->hctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
+ omap_host->sysctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
omap_host->capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ omap_host->ie = sdhci_omap_readl(omap_host, SDHCI_OMAP_IE);
+ omap_host->ise = sdhci_omap_readl(omap_host, SDHCI_OMAP_ISE);
}
+/* Order matters here, HCTL must be restored in two phases */
static void sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
{
- sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con);
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, omap_host->capa);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, omap_host->sysctl);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_IE, omap_host->ie);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_ISE, omap_host->ise);
}
-static int __maybe_unused sdhci_omap_suspend(struct device *dev)
+static int sdhci_omap_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- sdhci_suspend_host(host);
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
+
+ if (omap_host->con != -EINVAL)
+ sdhci_runtime_suspend_host(host);
sdhci_omap_context_save(omap_host);
pinctrl_pm_select_idle_state(dev);
- pm_runtime_force_suspend(dev);
-
return 0;
}
-static int __maybe_unused sdhci_omap_resume(struct device *dev)
+static int sdhci_omap_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- pm_runtime_force_resume(dev);
-
pinctrl_pm_select_default_state(dev);
- sdhci_omap_context_restore(omap_host);
-
- sdhci_resume_host(host);
+ if (omap_host->con != -EINVAL) {
+ sdhci_omap_context_restore(omap_host);
+ sdhci_runtime_resume_host(host, 0);
+ }
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(sdhci_omap_dev_pm_ops, sdhci_omap_suspend,
- sdhci_omap_resume);
+
+static const struct dev_pm_ops sdhci_omap_dev_pm_ops = {
+ RUNTIME_PM_OPS(sdhci_omap_runtime_suspend, sdhci_omap_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
static struct platform_driver sdhci_omap_driver = {
.probe = sdhci_omap_probe,
@@ -1297,7 +1469,7 @@ static struct platform_driver sdhci_omap_driver = {
.driver = {
.name = "sdhci-omap",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_omap_dev_pm_ops,
+ .pm = pm_ptr(&sdhci_omap_dev_pm_ops),
.of_match_table = omap_sdhci_match,
},
};
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index be19785227fe..47a0a738862b 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -17,20 +17,21 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/device.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/debugfs.h>
-#include <linux/mmc/slot-gpio.h>
-#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/slot-gpio.h>
+
#ifdef CONFIG_X86
#include <asm/iosf_mbi.h>
#endif
@@ -38,7 +39,9 @@
#include "cqhci.h"
#include "sdhci.h"
+#include "sdhci-cqhci.h"
#include "sdhci-pci.h"
+#include "sdhci-uhs2.h"
static void sdhci_pci_hw_reset(struct sdhci_host *host);
@@ -62,7 +65,7 @@ static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ))
return device_wakeup_enable(&chip->pdev->dev);
else if (!cap_cd_wake)
- return device_wakeup_disable(&chip->pdev->dev);
+ device_wakeup_disable(&chip->pdev->dev);
return 0;
}
@@ -149,18 +152,15 @@ static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot;
struct sdhci_host *host;
- int i, ret;
- for (i = 0; i < chip->num_slots; i++) {
+ for (int i = 0; i < chip->num_slots; i++) {
slot = chip->slots[i];
if (!slot)
continue;
host = slot->host;
- ret = sdhci_runtime_suspend_host(host);
- if (ret)
- goto err_pci_runtime_suspend;
+ sdhci_runtime_suspend_host(host);
if (chip->rpm_retune &&
host->tuning_mode != SDHCI_TUNING_MODE_3)
@@ -168,26 +168,18 @@ static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
}
return 0;
-
-err_pci_runtime_suspend:
- while (--i >= 0)
- sdhci_runtime_resume_host(chip->slots[i]->host, 0);
- return ret;
}
static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot;
- int i, ret;
- for (i = 0; i < chip->num_slots; i++) {
+ for (int i = 0; i < chip->num_slots; i++) {
slot = chip->slots[i];
if (!slot)
continue;
- ret = sdhci_runtime_resume_host(slot->host, 0);
- if (ret)
- return ret;
+ sdhci_runtime_resume_host(slot->host, 0);
}
return 0;
@@ -234,14 +226,6 @@ static void sdhci_pci_dumpregs(struct mmc_host *mmc)
sdhci_dumpregs(mmc_priv(mmc));
}
-static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask)
-{
- if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
- host->mmc->cqe_private)
- cqhci_deactivate(host->mmc);
- sdhci_reset(host, mask);
-}
-
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -258,13 +242,16 @@ static int ricoh_probe(struct sdhci_pci_chip *chip)
static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
{
- slot->host->caps =
+ u32 caps =
FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) |
FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) |
SDHCI_TIMEOUT_CLK_UNIT |
SDHCI_CAN_VDD_330 |
SDHCI_CAN_DO_HISPD |
SDHCI_CAN_DO_SDMA;
+ u32 caps1 = 0;
+
+ __sdhci_read_caps(slot->host, NULL, &caps, &caps1);
return 0;
}
@@ -293,10 +280,30 @@ static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
#endif
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_CLOCK_BEFORE_RESET |
- SDHCI_QUIRK_NO_CARD_NO_RESET |
- SDHCI_QUIRK_MISSING_CAPS
+ SDHCI_QUIRK_NO_CARD_NO_RESET,
};
+static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_set_ios(mmc, ios);
+
+ /*
+ * Some (ENE) controllers misbehave on some ios operations,
+ * signalling timeout and CRC errors even on CMD0. Resetting
+ * it on each ios seems to solve the problem.
+ */
+ if (!(host->flags & SDHCI_DEVICE_DEAD))
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+}
+
+static int ene_714_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc_host_ops.set_ios = ene_714_set_ios;
+ return 0;
+}
+
static const struct sdhci_pci_fixes sdhci_ene_712 = {
.quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_BROKEN_DMA,
@@ -304,8 +311,8 @@ static const struct sdhci_pci_fixes sdhci_ene_712 = {
static const struct sdhci_pci_fixes sdhci_ene_714 = {
.quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
SDHCI_QUIRK_BROKEN_DMA,
+ .probe_slot = ene_714_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_cafe = {
@@ -345,73 +352,6 @@ static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
-#ifdef CONFIG_PM
-
-static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id)
-{
- struct sdhci_pci_slot *slot = dev_id;
- struct sdhci_host *host = slot->host;
-
- mmc_detect_change(host->mmc, msecs_to_jiffies(200));
- return IRQ_HANDLED;
-}
-
-static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
-{
- int err, irq, gpio = slot->cd_gpio;
-
- slot->cd_gpio = -EINVAL;
- slot->cd_irq = -EINVAL;
-
- if (!gpio_is_valid(gpio))
- return;
-
- err = devm_gpio_request(&slot->chip->pdev->dev, gpio, "sd_cd");
- if (err < 0)
- goto out;
-
- err = gpio_direction_input(gpio);
- if (err < 0)
- goto out_free;
-
- irq = gpio_to_irq(gpio);
- if (irq < 0)
- goto out_free;
-
- err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING, "sd_cd", slot);
- if (err)
- goto out_free;
-
- slot->cd_gpio = gpio;
- slot->cd_irq = irq;
-
- return;
-
-out_free:
- devm_gpio_free(&slot->chip->pdev->dev, gpio);
-out:
- dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n");
-}
-
-static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
-{
- if (slot->cd_irq >= 0)
- free_irq(slot->cd_irq, slot);
-}
-
-#else
-
-static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
-{
-}
-
-static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
-{
-}
-
-#endif
-
static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
@@ -534,11 +474,12 @@ static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
int err = 0;
size_t len;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL,
+ ACPI_TYPE_BUFFER);
if (!obj)
return -EOPNOTSUPP;
- if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
+ if (obj->buffer.length < 1) {
err = -EINVAL;
goto out;
}
@@ -619,23 +560,16 @@ static int intel_select_drive_strength(struct mmc_card *card,
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
- struct sdhci_host *host = mmc_priv(mmc);
- unsigned long flags;
- int ret = 0;
if (!gpio_cd)
return 0;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
-
- ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
-out:
- spin_unlock_irqrestore(&host->lock, flags);
+ return sdhci_get_cd_nogpio(mmc);
+}
- return ret;
+static int mrfld_get_cd(struct mmc_host *mmc)
+{
+ return sdhci_get_cd_nogpio(mmc);
}
#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
@@ -665,8 +599,12 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power(host, mode, vdd);
- if (mode == MMC_POWER_OFF)
+ if (mode == MMC_POWER_OFF) {
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BYT_SD)
+ usleep_range(15000, 17500);
return;
+ }
/*
* Bus power might not enable after D3 -> D0 transition due to the
@@ -741,8 +679,19 @@ static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
return 0;
}
+static void sdhci_intel_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+
+ /* Stop card clock separately to avoid glitches on clock line */
+ if (clk & SDHCI_CLOCK_CARD_EN)
+ sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN, SDHCI_CLOCK_CONTROL);
+
+ sdhci_set_clock(host, clock);
+}
+
static const struct sdhci_ops sdhci_intel_byt_ops = {
- .set_clock = sdhci_set_clock,
+ .set_clock = sdhci_intel_set_clock,
.set_power = sdhci_intel_set_power,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
@@ -752,11 +701,11 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
};
static const struct sdhci_ops sdhci_intel_glk_ops = {
- .set_clock = sdhci_set_clock,
+ .set_clock = sdhci_intel_set_clock,
.set_power = sdhci_intel_set_power,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_cqhci_reset,
+ .reset = sdhci_and_cqhci_reset,
.set_uhs_signaling = sdhci_intel_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
.irq = sdhci_cqhci_irq,
@@ -964,7 +913,14 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
{
return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
(dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
- dmi_match(DMI_SYS_VENDOR, "IRBIS"));
+ dmi_match(DMI_SYS_VENDOR, "IRBIS") ||
+ dmi_match(DMI_SYS_VENDOR, "Positivo Tecnologia SA"));
+}
+
+static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
+{
+ return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
+ dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
}
static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
@@ -975,9 +931,11 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
slot->host->mmc->caps2 |= MMC_CAP2_CQE;
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
- slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
- slot->host->mmc_host_ops.hs400_enhanced_strobe =
- intel_hs400_enhanced_strobe;
+ if (!jsl_broken_hs400es(slot)) {
+ slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
+ slot->host->mmc_host_ops.hs400_enhanced_strobe =
+ intel_hs400_enhanced_strobe;
+ }
slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
}
@@ -1284,6 +1242,29 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
.priv_size = sizeof(struct intel_host),
};
+/* DMI quirks for devices with missing or broken CD GPIO info */
+static const struct gpiod_lookup_table vexia_edu_atla10_cd_gpios = {
+ .dev_id = "0000:00:12.0",
+ .table = {
+ GPIO_LOOKUP("INT33FC:00", 38, "cd", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static const struct dmi_system_id sdhci_intel_byt_cd_gpio_override[] = {
+ {
+ /* Vexia Edu Atla 10 tablet 9V version */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
+ },
+ .driver_data = (void *)&vexia_edu_atla10_cd_gpios,
+ },
+ { }
+};
+
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
#ifdef CONFIG_PM_SLEEP
.resume = byt_resume,
@@ -1302,6 +1283,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
.add_host = byt_add_host,
.remove_slot = byt_remove_slot,
.ops = &sdhci_intel_byt_ops,
+ .cd_gpio_override = sdhci_intel_byt_cd_gpio_override,
.priv_size = sizeof(struct intel_host),
};
@@ -1314,16 +1296,11 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
#ifdef CONFIG_ACPI
static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot)
{
- struct acpi_device *device, *child;
+ struct acpi_device *device;
device = ACPI_COMPANION(&slot->chip->pdev->dev);
- if (!device)
- return;
-
- acpi_device_fix_up_power(device);
- list_for_each_entry(child, &device->children, node)
- if (child->status.present && child->status.enabled)
- acpi_device_fix_up_power(child);
+ if (device)
+ acpi_device_fix_up_power_extended(device);
}
#else
static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {}
@@ -1341,6 +1318,14 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
MMC_CAP_1_8V_DDR;
break;
case INTEL_MRFLD_SD:
+ slot->cd_idx = 0;
+ slot->cd_override_level = true;
+ /*
+ * There are two PCB designs of SD card slot with the opposite
+ * card detection sense. Quirk this out by ignoring GPIO state
+ * completely in the custom ->get_cd() callback.
+ */
+ slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
break;
case INTEL_MRFLD_SDIO:
@@ -1365,6 +1350,23 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
.probe_slot = intel_mrfld_mmc_probe_slot,
};
+#define JMB388_SAMPLE_COUNT 5
+
+static int jmicron_jmb388_get_ro(struct mmc_host *mmc)
+{
+ int i, ro_count;
+
+ ro_count = 0;
+ for (i = 0; i < JMB388_SAMPLE_COUNT; i++) {
+ if (sdhci_get_ro(mmc) > 0) {
+ if (++ro_count > JMB388_SAMPLE_COUNT / 2)
+ return 1;
+ }
+ msleep(30);
+ }
+ return 0;
+}
+
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -1372,7 +1374,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
if (ret)
- return ret;
+ goto fail;
/*
* Turn PMOS on [bit 0], set over current detection to 2.4 V
@@ -1383,7 +1385,10 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
else
scratch &= ~0x47;
- return pci_write_config_byte(chip->pdev, 0xAE, scratch);
+ ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
+
+fail:
+ return pcibios_err_to_errno(ret);
}
static int jmicron_probe(struct sdhci_pci_chip *chip)
@@ -1446,11 +1451,6 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
return ret;
}
- /* quirk for unsable RO-detection on JM388 chips */
- if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
- chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
- chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
-
return 0;
}
@@ -1505,6 +1505,11 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
+ /* Handle unstable RO-detection on JM388 chips */
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
+ slot->host->mmc_host_ops.get_ro = jmicron_jmb388_get_ro;
+
return 0;
}
@@ -1791,6 +1796,8 @@ static int amd_probe(struct sdhci_pci_chip *chip)
}
}
+ pci_dev_put(smbus_dev);
+
if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
@@ -1932,6 +1939,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd),
+ SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),
@@ -1942,11 +1950,16 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(O2, SDS1, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
+ SDHCI_PCI_DEVICE(O2, GG8_9860, o2),
+ SDHCI_PCI_DEVICE(O2, GG8_9861, o2),
+ SDHCI_PCI_DEVICE(O2, GG8_9862, o2),
+ SDHCI_PCI_DEVICE(O2, GG8_9863, o2),
SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e),
+ SDHCI_PCI_DEVICE(GLI, 9767, gl9767),
SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
/* Generic SD host controller */
{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
@@ -1981,21 +1994,6 @@ int sdhci_pci_enable_dma(struct sdhci_host *host)
return 0;
}
-static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host)
-{
- struct sdhci_pci_slot *slot = sdhci_priv(host);
- int rst_n_gpio = slot->rst_n_gpio;
-
- if (!gpio_is_valid(rst_n_gpio))
- return;
- gpio_set_value_cansleep(rst_n_gpio, 0);
- /* For eMMC, minimum is 1us but give it 10us for good measure */
- udelay(10);
- gpio_set_value_cansleep(rst_n_gpio, 1);
- /* For eMMC, minimum is 200us but give it 300us for good measure */
- usleep_range(300, 1000);
-}
-
static void sdhci_pci_hw_reset(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
@@ -2087,6 +2085,42 @@ static const struct dev_pm_ops sdhci_pci_pm_ops = {
* *
\*****************************************************************************/
+static struct gpiod_lookup_table *sdhci_pci_add_gpio_lookup_table(
+ struct sdhci_pci_chip *chip)
+{
+ struct gpiod_lookup_table *cd_gpio_lookup_table;
+ const struct dmi_system_id *dmi_id = NULL;
+ size_t count;
+
+ if (chip->fixes && chip->fixes->cd_gpio_override)
+ dmi_id = dmi_first_match(chip->fixes->cd_gpio_override);
+
+ if (!dmi_id)
+ return NULL;
+
+ cd_gpio_lookup_table = dmi_id->driver_data;
+ for (count = 0; cd_gpio_lookup_table->table[count].key; count++)
+ ;
+
+ cd_gpio_lookup_table = kmemdup(dmi_id->driver_data,
+ /* count + 1 terminating entry */
+ struct_size(cd_gpio_lookup_table, table, count + 1),
+ GFP_KERNEL);
+ if (!cd_gpio_lookup_table)
+ return ERR_PTR(-ENOMEM);
+
+ gpiod_add_lookup_table(cd_gpio_lookup_table);
+ return cd_gpio_lookup_table;
+}
+
+static void sdhci_pci_remove_gpio_lookup_table(struct gpiod_lookup_table *lookup_table)
+{
+ if (lookup_table) {
+ gpiod_remove_lookup_table(lookup_table);
+ kfree(lookup_table);
+ }
+}
+
static struct sdhci_pci_slot *sdhci_pci_probe_slot(
struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
int slotno)
@@ -2126,26 +2160,8 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
slot->chip = chip;
slot->host = host;
- slot->rst_n_gpio = -EINVAL;
- slot->cd_gpio = -EINVAL;
slot->cd_idx = -1;
- /* Retrieve platform data if there is any */
- if (*sdhci_pci_get_data)
- slot->data = sdhci_pci_get_data(pdev, slotno);
-
- if (slot->data) {
- if (slot->data->setup) {
- ret = slot->data->setup(slot->data);
- if (ret) {
- dev_err(&pdev->dev, "platform setup failed\n");
- goto free;
- }
- }
- slot->rst_n_gpio = slot->data->rst_n_gpio;
- slot->cd_gpio = slot->data->cd_gpio;
- }
-
host->hw_name = "PCI";
host->ops = chip->fixes && chip->fixes->ops ?
chip->fixes->ops :
@@ -2158,7 +2174,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc));
if (ret) {
dev_err(&pdev->dev, "cannot request region\n");
- goto cleanup;
+ return ERR_PTR(ret);
}
host->ioaddr = pcim_iomap_table(pdev)[bar];
@@ -2166,18 +2182,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
if (chip->fixes && chip->fixes->probe_slot) {
ret = chip->fixes->probe_slot(slot);
if (ret)
- goto cleanup;
- }
-
- if (gpio_is_valid(slot->rst_n_gpio)) {
- if (!devm_gpio_request(&pdev->dev, slot->rst_n_gpio, "eMMC_reset")) {
- gpio_direction_output(slot->rst_n_gpio, 1);
- slot->host->mmc->caps |= MMC_CAP_HW_RESET;
- slot->hw_reset = sdhci_pci_gpio_hw_reset;
- } else {
- dev_warn(&pdev->dev, "failed to request rst_n_gpio\n");
- slot->rst_n_gpio = -EINVAL;
- }
+ return ERR_PTR(ret);
}
host->mmc->pm_caps = MMC_PM_KEEP_POWER;
@@ -2191,8 +2196,19 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
device_init_wakeup(&pdev->dev, true);
if (slot->cd_idx >= 0) {
+ struct gpiod_lookup_table *cd_gpio_lookup_table;
+
+ cd_gpio_lookup_table = sdhci_pci_add_gpio_lookup_table(chip);
+ if (IS_ERR(cd_gpio_lookup_table)) {
+ ret = PTR_ERR(cd_gpio_lookup_table);
+ goto remove;
+ }
+
ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
slot->cd_override_level, 0);
+
+ sdhci_pci_remove_gpio_lookup_table(cd_gpio_lookup_table);
+
if (ret && ret != -EPROBE_DEFER)
ret = mmc_gpiod_request_cd(host->mmc, NULL,
slot->cd_idx,
@@ -2214,15 +2230,11 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
if (ret)
goto remove;
- sdhci_pci_add_own_cd(slot);
-
/*
* Check if the chip needs a separate GPIO for card detect to wake up
* from runtime suspend. If it is not there, don't allow runtime PM.
- * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure.
*/
- if (chip->fixes && chip->fixes->own_cd_for_runtime_pm &&
- !gpio_is_valid(slot->cd_gpio) && slot->cd_idx < 0)
+ if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0)
chip->allow_runtime_pm = false;
return slot;
@@ -2231,13 +2243,6 @@ remove:
if (chip->fixes && chip->fixes->remove_slot)
chip->fixes->remove_slot(slot, 0);
-cleanup:
- if (slot->data && slot->data->cleanup)
- slot->data->cleanup(slot->data);
-
-free:
- sdhci_free_host(host);
-
return ERR_PTR(ret);
}
@@ -2246,22 +2251,28 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
int dead;
u32 scratch;
- sdhci_pci_remove_own_cd(slot);
-
dead = 0;
scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
dead = 1;
- sdhci_remove_host(slot->host, dead);
+ if (slot->chip->fixes && slot->chip->fixes->remove_host)
+ slot->chip->fixes->remove_host(slot, dead);
+ else
+ sdhci_remove_host(slot->host, dead);
if (slot->chip->fixes && slot->chip->fixes->remove_slot)
slot->chip->fixes->remove_slot(slot, dead);
+}
- if (slot->data && slot->data->cleanup)
- slot->data->cleanup(slot->data);
+int sdhci_pci_uhs2_add_host(struct sdhci_pci_slot *slot)
+{
+ return sdhci_uhs2_add_host(slot->host);
+}
- sdhci_free_host(slot->host);
+void sdhci_pci_uhs2_remove_host(struct sdhci_pci_slot *slot, int dead)
+{
+ sdhci_uhs2_remove_host(slot->host, dead);
}
static void sdhci_pci_runtime_pm_allow(struct device *dev)
@@ -2297,7 +2308,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
if (ret)
- return ret;
+ return pcibios_err_to_errno(ret);
slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
@@ -2306,7 +2317,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
if (ret)
- return ret;
+ return pcibios_err_to_errno(ret);
first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
@@ -2379,7 +2390,8 @@ static struct pci_driver sdhci_driver = {
.probe = sdhci_pci_probe,
.remove = sdhci_pci_remove,
.driver = {
- .pm = &sdhci_pci_pm_ops
+ .pm = &sdhci_pci_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/mmc/host/sdhci-pci-data.c b/drivers/mmc/host/sdhci-pci-data.c
deleted file mode 100644
index 18638fb363d8..000000000000
--- a/drivers/mmc/host/sdhci-pci-data.c
+++ /dev/null
@@ -1,6 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
-#include <linux/mmc/sdhci-pci-data.h>
-
-struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno);
-EXPORT_SYMBOL_GPL(sdhci_pci_get_data);
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 4fd99c1e82ba..b0f91cc9e40e 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -12,9 +12,13 @@
#include <linux/pci.h>
#include <linux/mmc/mmc.h>
#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/iopoll.h>
#include "sdhci.h"
+#include "sdhci-cqhci.h"
#include "sdhci-pci.h"
#include "cqhci.h"
+#include "sdhci-uhs2.h"
/* Genesys Logic extra registers */
#define SDHCI_GLI_9750_WT 0x800
@@ -62,6 +66,7 @@
#define GLI_9750_MISC_RX_INV_OFF 0x0
#define GLI_9750_MISC_RX_INV_VALUE GLI_9750_MISC_RX_INV_OFF
#define GLI_9750_MISC_TX1_DLY_VALUE 0x5
+#define SDHCI_GLI_9750_MISC_SSC_OFF BIT(26)
#define SDHCI_GLI_9750_TUNING_CONTROL 0x540
#define SDHCI_GLI_9750_TUNING_CONTROL_EN BIT(4)
@@ -92,6 +97,9 @@
#define PCIE_GLI_9763E_SCR 0x8E0
#define GLI_9763E_SCR_AXI_REQ BIT(9)
+#define PCIE_GLI_9763E_CFG 0x8A0
+#define GLI_9763E_CFG_LPSN_DIS BIT(12)
+
#define PCIE_GLI_9763E_CFG2 0x8A4
#define GLI_9763E_CFG2_L1DLY GENMASK(28, 19)
#define GLI_9763E_CFG2_L1DLY_MID 0x54
@@ -116,6 +124,8 @@
#define PCI_GLI_9755_PECONF 0x44
#define PCI_GLI_9755_LFCLK GENMASK(14, 12)
#define PCI_GLI_9755_DMACLK BIT(29)
+#define PCI_GLI_9755_INVERT_CD BIT(30)
+#define PCI_GLI_9755_INVERT_WP BIT(31)
#define PCI_GLI_9755_CFG2 0x48
#define PCI_GLI_9755_CFG2_L1DLY GENMASK(28, 24)
@@ -130,13 +140,169 @@
#define PCI_GLI_9755_PLLSSC 0x68
#define PCI_GLI_9755_PLLSSC_PPM GENMASK(15, 0)
+#define PCI_GLI_9755_PLLSSC_RTL BIT(24)
+#define GLI_9755_PLLSSC_RTL_VALUE 0x1
+#define PCI_GLI_9755_PLLSSC_TRANS_PASS BIT(27)
+#define GLI_9755_PLLSSC_TRANS_PASS_VALUE 0x1
+#define PCI_GLI_9755_PLLSSC_RECV GENMASK(29, 28)
+#define GLI_9755_PLLSSC_RECV_VALUE 0x0
+#define PCI_GLI_9755_PLLSSC_TRAN GENMASK(31, 30)
+#define GLI_9755_PLLSSC_TRAN_VALUE 0x3
+
+#define PCI_GLI_9755_UHS2_PLL 0x6C
+#define PCI_GLI_9755_UHS2_PLL_SSC GENMASK(9, 8)
+#define GLI_9755_UHS2_PLL_SSC_VALUE 0x0
+#define PCI_GLI_9755_UHS2_PLL_DELAY BIT(18)
+#define GLI_9755_UHS2_PLL_DELAY_VALUE 0x1
+#define PCI_GLI_9755_UHS2_PLL_PDRST BIT(27)
+#define GLI_9755_UHS2_PLL_PDRST_VALUE 0x1
#define PCI_GLI_9755_SerDes 0x70
+#define PCI_GLI_9755_UHS2_SERDES_INTR GENMASK(2, 0)
+#define GLI_9755_UHS2_SERDES_INTR_VALUE 0x3
+#define PCI_GLI_9755_UHS2_SERDES_ZC1 BIT(3)
+#define GLI_9755_UHS2_SERDES_ZC1_VALUE 0x0
+#define PCI_GLI_9755_UHS2_SERDES_ZC2 GENMASK(7, 4)
+#define GLI_9755_UHS2_SERDES_ZC2_DEFAULT 0xB
+#define GLI_9755_UHS2_SERDES_ZC2_SANDISK 0x0
#define PCI_GLI_9755_SCP_DIS BIT(19)
+#define PCI_GLI_9755_UHS2_SERDES_TRAN GENMASK(27, 24)
+#define GLI_9755_UHS2_SERDES_TRAN_VALUE 0xC
+#define PCI_GLI_9755_UHS2_SERDES_RECV GENMASK(31, 28)
+#define GLI_9755_UHS2_SERDES_RECV_VALUE 0xF
+
+#define PCI_GLI_9755_MISC 0x78
+#define PCI_GLI_9755_MISC_SSC_OFF BIT(26)
+
+#define SDHCI_GLI_9767_SD_HOST_OPERATION_CTL 0x508
+#define SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_CMD_CONFLICT_CHECK BIT(0)
+#define SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE GENMASK(21, 16)
+#define SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_PLUG_IN_VALUE 0x05
+#define SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_PLUG_OUT_VALUE 0x3F
+#define SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_SCALE GENMASK(23, 22)
+#define SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_SCALE_1MS 0x2
+#define SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_SCALE_10MS 0x3
+
+#define SDHCI_GLI_9767_GM_BURST_SIZE 0x510
+#define SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET BIT(8)
+
+#define PCIE_GLI_9767_VHS 0x884
+#define GLI_9767_VHS_REV GENMASK(19, 16)
+#define GLI_9767_VHS_REV_R 0x0
+#define GLI_9767_VHS_REV_M 0x1
+#define GLI_9767_VHS_REV_W 0x2
+
+#define PCIE_GLI_9767_COM_MAILBOX 0x888
+#define PCIE_GLI_9767_COM_MAILBOX_SSC_EN BIT(1)
+
+#define PCIE_GLI_9767_CFG 0x8A0
+#define PCIE_GLI_9767_CFG_LOW_PWR_OFF BIT(12)
+
+#define PCIE_GLI_9767_COMBO_MUX_CTL 0x8C8
+#define PCIE_GLI_9767_COMBO_MUX_CTL_RST_EN BIT(6)
+#define PCIE_GLI_9767_COMBO_MUX_CTL_WAIT_PERST_EN BIT(10)
+
+#define PCIE_GLI_9767_PWR_MACRO_CTL 0x8D0
+#define PCIE_GLI_9767_PWR_MACRO_CTL_LOW_VOLTAGE GENMASK(3, 0)
+#define PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE GENMASK(15, 12)
+#define PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE_VALUE 0x7
+#define PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL GENMASK(29, 28)
+#define PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL_VALUE 0x3
+
+#define PCIE_GLI_9767_SCR 0x8E0
+#define PCIE_GLI_9767_SCR_AUTO_AXI_W_BURST BIT(6)
+#define PCIE_GLI_9767_SCR_AUTO_AXI_R_BURST BIT(7)
+#define PCIE_GLI_9767_SCR_AXI_REQ BIT(9)
+#define PCIE_GLI_9767_SCR_CARD_DET_PWR_SAVING_EN BIT(10)
+#define PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE0 BIT(16)
+#define PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE1 BIT(17)
+#define PCIE_GLI_9767_SCR_CORE_PWR_D3_OFF BIT(21)
+#define PCIE_GLI_9767_SCR_CFG_RST_DATA_LINK_DOWN BIT(30)
+
+#define PCIE_GLI_9767_RESET_REG 0x8E4
+#define PCIE_GLI_9767_RESET_REG_SD_HOST_SW_RESET BIT(0)
+
+#define PCIE_GLI_9767_UHS2_PHY_SET_REG1 0x90C
+#define PCIE_GLI_9767_UHS2_PHY_SET_REG1_SERDES_INTR GENMASK(31, 29)
+#define PCIE_GLI_9767_UHS2_PHY_SET_REG1_SERDES_INTR_VALUE 0x3
+
+#define PCIE_GLI_9767_SDHC_CAP 0x91C
+#define PCIE_GLI_9767_SDHC_CAP_SDEI_RESULT BIT(5)
+
+#define PCIE_GLI_9767_SD_PLL_CTL 0x938
+#define PCIE_GLI_9767_SD_PLL_CTL_PLL_LDIV GENMASK(9, 0)
+#define PCIE_GLI_9767_SD_PLL_CTL_PLL_PDIV GENMASK(15, 12)
+#define PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN BIT(16)
+#define PCIE_GLI_9767_SD_PLL_CTL_SSC_EN BIT(19)
+#define PCIE_GLI_9767_SD_PLL_CTL_SSC_STEP_SETTING GENMASK(28, 24)
+
+#define PCIE_GLI_9767_SD_PLL_CTL2 0x93C
+#define PCIE_GLI_9767_SD_PLL_CTL2_PLLSSC_PPM GENMASK(31, 16)
+
+#define PCIE_GLI_9767_SD_EXPRESS_CTL 0x940
+#define PCIE_GLI_9767_SD_EXPRESS_CTL_SDEI_EXE BIT(0)
+#define PCIE_GLI_9767_SD_EXPRESS_CTL_SD_EXPRESS_MODE BIT(1)
+
+#define PCIE_GLI_9767_SD_DATA_MULTI_CTL 0x944
+#define PCIE_GLI_9767_SD_DATA_MULTI_CTL_SELECT_UHS2 BIT(5)
+#define PCIE_GLI_9767_SD_DATA_MULTI_CTL_UHS2_SWITCH_CTL BIT(8)
+#define PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME GENMASK(23, 16)
+#define PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME_VALUE 0x64
+
+#define PCIE_GLI_9767_UHS2_PHY_SET_REG2 0x948
+#define PCIE_GLI_9767_UHS2_PHY_SET_REG2_SSC_PPM_SETTING GENMASK(22, 21)
+#define PCIE_GLI_9767_UHS2_PHY_SET_REG2_SSC_PPM_SETTING_VALUE 0x0
+
+#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2 0x950
+#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2_SDEI_COMPLETE BIT(0)
+
+#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2 0x954
+#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2_SDEI_COMPLETE_STATUS_EN BIT(0)
+
+#define PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2 0x958
+#define PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2_SDEI_COMPLETE_SIGNAL_EN BIT(0)
+
+#define PCIE_GLI_9767_UHS2_CTL1 0x95C
+#define PCIE_GLI_9767_UHS2_CTL1_TRANS_PASS BIT(5)
+#define PCIE_GLI_9767_UHS2_CTL1_TRANS_PASS_VALUE 0x1
+#define PCIE_GLI_9767_UHS2_CTL1_DECODING_CTL BIT(6)
+#define PCIE_GLI_9767_UHS2_CTL1_DECODING_CTL_VALUE 0x1
+#define PCIE_GLI_9767_UHS2_CTL1_SERDES_TRAN GENMASK(10, 7)
+#define PCIE_GLI_9767_UHS2_CTL1_SERDES_TRAN_VALUE 0x3
+#define PCIE_GLI_9767_UHS2_CTL1_SERDES_RECV GENMASK(14, 11)
+#define PCIE_GLI_9767_UHS2_CTL1_SERDES_RECV_VALUE 0xf
+#define PCIE_GLI_9767_UHS2_CTL1_DIR_TRANS GENMASK(16, 15)
+#define PCIE_GLI_9767_UHS2_CTL1_DIR_TRANS_VALUE 0x0
+#define PCIE_GLI_9767_UHS2_CTL1_DIR_RECV GENMASK(18, 17)
+#define PCIE_GLI_9767_UHS2_CTL1_DIR_RECV_VALUE 0x0
+#define PCIE_GLI_9767_UHS2_CTL1_PDRST BIT(25)
+#define PCIE_GLI_9767_UHS2_CTL1_PDRST_VALUE 0x1
+
+#define PCIE_GLI_9767_UHS2_CTL2 0x964
+#define PCIE_GLI_9767_UHS2_CTL2_ZC GENMASK(3, 0)
+#define PCIE_GLI_9767_UHS2_CTL2_ZC_VALUE 0xb
+#define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL BIT(6)
+#define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL_VALUE 0x1
+#define PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN BIT(13)
+#define PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE BIT(14)
#define GLI_MAX_TUNING_LOOP 40
/* Genesys Logic chipset */
+static void sdhci_gli_mask_replay_timer_timeout(struct pci_dev *pdev)
+{
+ int aer;
+ u32 value;
+
+ /* mask the replay timer timeout of AER */
+ aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (aer) {
+ pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
+ value |= PCI_ERR_COR_REP_TIMER;
+ pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
+ }
+}
+
static inline void gl9750_wt_on(struct sdhci_host *host)
{
u32 wt_value;
@@ -368,6 +534,19 @@ static void gl9750_set_pll(struct sdhci_host *host, u8 dir, u16 ldiv, u8 pdiv)
mdelay(1);
}
+static bool gl9750_ssc_enable(struct sdhci_host *host)
+{
+ u32 misc;
+ u8 off;
+
+ gl9750_wt_on(host);
+ misc = sdhci_readl(host, SDHCI_GLI_9750_MISC);
+ off = FIELD_GET(SDHCI_GLI_9750_MISC_SSC_OFF, misc);
+ gl9750_wt_off(host);
+
+ return !off;
+}
+
static void gl9750_set_ssc(struct sdhci_host *host, u8 enable, u8 step, u16 ppm)
{
u32 pll;
@@ -389,11 +568,31 @@ static void gl9750_set_ssc(struct sdhci_host *host, u8 enable, u8 step, u16 ppm)
static void gl9750_set_ssc_pll_205mhz(struct sdhci_host *host)
{
- /* set pll to 205MHz and enable ssc */
- gl9750_set_ssc(host, 0x1, 0x1F, 0xFFE7);
+ bool enable = gl9750_ssc_enable(host);
+
+ /* set pll to 205MHz and ssc */
+ gl9750_set_ssc(host, enable, 0xF, 0x5A1D);
gl9750_set_pll(host, 0x1, 0x246, 0x0);
}
+static void gl9750_set_ssc_pll_100mhz(struct sdhci_host *host)
+{
+ bool enable = gl9750_ssc_enable(host);
+
+ /* set pll to 100MHz and ssc */
+ gl9750_set_ssc(host, enable, 0xE, 0x51EC);
+ gl9750_set_pll(host, 0x1, 0x244, 0x1);
+}
+
+static void gl9750_set_ssc_pll_50mhz(struct sdhci_host *host)
+{
+ bool enable = gl9750_ssc_enable(host);
+
+ /* set pll to 50MHz and ssc */
+ gl9750_set_ssc(host, enable, 0xE, 0x51EC);
+ gl9750_set_pll(host, 0x1, 0x244, 0x3);
+}
+
static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct mmc_ios *ios = &host->mmc->ios;
@@ -411,6 +610,10 @@ static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
host->mmc->actual_clock = 205000000;
gl9750_set_ssc_pll_205mhz(host);
+ } else if (clock == 100000000) {
+ gl9750_set_ssc_pll_100mhz(host);
+ } else if (clock == 50000000) {
+ gl9750_set_ssc_pll_50mhz(host);
}
sdhci_enable_clk(host, clk);
@@ -418,8 +621,12 @@ static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
static void gl9750_hw_setting(struct sdhci_host *host)
{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev;
u32 value;
+ pdev = slot->chip->pdev;
+
gl9750_wt_on(host);
value = sdhci_readl(host, SDHCI_GLI_9750_CFG2);
@@ -429,6 +636,13 @@ static void gl9750_hw_setting(struct sdhci_host *host)
GLI_9750_CFG2_L1DLY_VALUE);
sdhci_writel(host, value, SDHCI_GLI_9750_CFG2);
+ /* toggle PM state to allow GL9750 to enter ASPM L1.2 */
+ pci_set_power_state(pdev, PCI_D3hot);
+ pci_set_power_state(pdev, PCI_D0);
+
+ /* mask the replay timer timeout of AER */
+ sdhci_gli_mask_replay_timer_timeout(pdev);
+
gl9750_wt_off(host);
}
@@ -511,6 +725,19 @@ static void gl9755_set_pll(struct pci_dev *pdev, u8 dir, u16 ldiv, u8 pdiv)
mdelay(1);
}
+static bool gl9755_ssc_enable(struct pci_dev *pdev)
+{
+ u32 misc;
+ u8 off;
+
+ gl9755_wt_on(pdev);
+ pci_read_config_dword(pdev, PCI_GLI_9755_MISC, &misc);
+ off = FIELD_GET(PCI_GLI_9755_MISC_SSC_OFF, misc);
+ gl9755_wt_off(pdev);
+
+ return !off;
+}
+
static void gl9755_set_ssc(struct pci_dev *pdev, u8 enable, u8 step, u16 ppm)
{
u32 pll;
@@ -532,11 +759,31 @@ static void gl9755_set_ssc(struct pci_dev *pdev, u8 enable, u8 step, u16 ppm)
static void gl9755_set_ssc_pll_205mhz(struct pci_dev *pdev)
{
- /* set pll to 205MHz and enable ssc */
- gl9755_set_ssc(pdev, 0x1, 0x1F, 0xFFE7);
+ bool enable = gl9755_ssc_enable(pdev);
+
+ /* set pll to 205MHz and ssc */
+ gl9755_set_ssc(pdev, enable, 0xF, 0x5A1D);
gl9755_set_pll(pdev, 0x1, 0x246, 0x0);
}
+static void gl9755_set_ssc_pll_100mhz(struct pci_dev *pdev)
+{
+ bool enable = gl9755_ssc_enable(pdev);
+
+ /* set pll to 100MHz and ssc */
+ gl9755_set_ssc(pdev, enable, 0xE, 0x51EC);
+ gl9755_set_pll(pdev, 0x1, 0x244, 0x1);
+}
+
+static void gl9755_set_ssc_pll_50mhz(struct pci_dev *pdev)
+{
+ bool enable = gl9755_ssc_enable(pdev);
+
+ /* set pll to 50MHz and ssc */
+ gl9755_set_ssc(pdev, enable, 0xE, 0x51EC);
+ gl9755_set_pll(pdev, 0x1, 0x244, 0x3);
+}
+
static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
@@ -557,6 +804,10 @@ static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
host->mmc->actual_clock = 205000000;
gl9755_set_ssc_pll_205mhz(pdev);
+ } else if (clock == 100000000) {
+ gl9755_set_ssc_pll_100mhz(pdev);
+ } else if (clock == 50000000) {
+ gl9755_set_ssc_pll_50mhz(pdev);
}
sdhci_enable_clk(host, clk);
@@ -570,6 +821,14 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
gl9755_wt_on(pdev);
pci_read_config_dword(pdev, PCI_GLI_9755_PECONF, &value);
+ /*
+ * Apple ARM64 platforms using these chips may have
+ * inverted CD/WP detection.
+ */
+ if (of_property_read_bool(pdev->dev.of_node, "cd-inverted"))
+ value |= PCI_GLI_9755_INVERT_CD;
+ if (of_property_read_bool(pdev->dev.of_node, "wp-inverted"))
+ value |= PCI_GLI_9755_INVERT_WP;
value &= ~PCI_GLI_9755_LFCLK;
value &= ~PCI_GLI_9755_DMACLK;
pci_write_config_dword(pdev, PCI_GLI_9755_PECONF, value);
@@ -586,9 +845,721 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
GLI_9755_CFG2_L1DLY_VALUE);
pci_write_config_dword(pdev, PCI_GLI_9755_CFG2, value);
+ /* toggle PM state to allow GL9755 to enter ASPM L1.2 */
+ pci_set_power_state(pdev, PCI_D3hot);
+ pci_set_power_state(pdev, PCI_D0);
+
+ /* mask the replay timer timeout of AER */
+ sdhci_gli_mask_replay_timer_timeout(pdev);
+
gl9755_wt_off(pdev);
}
+static void gl9755_vendor_init(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 serdes;
+ u32 pllssc;
+ u32 uhs2_pll;
+
+ gl9755_wt_on(pdev);
+
+ pci_read_config_dword(pdev, PCI_GLI_9755_SerDes, &serdes);
+ serdes &= ~PCI_GLI_9755_UHS2_SERDES_TRAN;
+ serdes |= FIELD_PREP(PCI_GLI_9755_UHS2_SERDES_TRAN,
+ GLI_9755_UHS2_SERDES_TRAN_VALUE);
+ serdes &= ~PCI_GLI_9755_UHS2_SERDES_RECV;
+ serdes |= FIELD_PREP(PCI_GLI_9755_UHS2_SERDES_RECV,
+ GLI_9755_UHS2_SERDES_RECV_VALUE);
+ serdes &= ~PCI_GLI_9755_UHS2_SERDES_INTR;
+ serdes |= FIELD_PREP(PCI_GLI_9755_UHS2_SERDES_INTR,
+ GLI_9755_UHS2_SERDES_INTR_VALUE);
+ serdes &= ~PCI_GLI_9755_UHS2_SERDES_ZC1;
+ serdes |= FIELD_PREP(PCI_GLI_9755_UHS2_SERDES_ZC1,
+ GLI_9755_UHS2_SERDES_ZC1_VALUE);
+ serdes &= ~PCI_GLI_9755_UHS2_SERDES_ZC2;
+ serdes |= FIELD_PREP(PCI_GLI_9755_UHS2_SERDES_ZC2,
+ GLI_9755_UHS2_SERDES_ZC2_DEFAULT);
+ pci_write_config_dword(pdev, PCI_GLI_9755_SerDes, serdes);
+
+ pci_read_config_dword(pdev, PCI_GLI_9755_UHS2_PLL, &uhs2_pll);
+ uhs2_pll &= ~PCI_GLI_9755_UHS2_PLL_SSC;
+ uhs2_pll |= FIELD_PREP(PCI_GLI_9755_UHS2_PLL_SSC,
+ GLI_9755_UHS2_PLL_SSC_VALUE);
+ uhs2_pll &= ~PCI_GLI_9755_UHS2_PLL_DELAY;
+ uhs2_pll |= FIELD_PREP(PCI_GLI_9755_UHS2_PLL_DELAY,
+ GLI_9755_UHS2_PLL_DELAY_VALUE);
+ uhs2_pll &= ~PCI_GLI_9755_UHS2_PLL_PDRST;
+ uhs2_pll |= FIELD_PREP(PCI_GLI_9755_UHS2_PLL_PDRST,
+ GLI_9755_UHS2_PLL_PDRST_VALUE);
+ pci_write_config_dword(pdev, PCI_GLI_9755_UHS2_PLL, uhs2_pll);
+
+ pci_read_config_dword(pdev, PCI_GLI_9755_PLLSSC, &pllssc);
+ pllssc &= ~PCI_GLI_9755_PLLSSC_RTL;
+ pllssc |= FIELD_PREP(PCI_GLI_9755_PLLSSC_RTL,
+ GLI_9755_PLLSSC_RTL_VALUE);
+ pllssc &= ~PCI_GLI_9755_PLLSSC_TRANS_PASS;
+ pllssc |= FIELD_PREP(PCI_GLI_9755_PLLSSC_TRANS_PASS,
+ GLI_9755_PLLSSC_TRANS_PASS_VALUE);
+ pllssc &= ~PCI_GLI_9755_PLLSSC_RECV;
+ pllssc |= FIELD_PREP(PCI_GLI_9755_PLLSSC_RECV,
+ GLI_9755_PLLSSC_RECV_VALUE);
+ pllssc &= ~PCI_GLI_9755_PLLSSC_TRAN;
+ pllssc |= FIELD_PREP(PCI_GLI_9755_PLLSSC_TRAN,
+ GLI_9755_PLLSSC_TRAN_VALUE);
+ pci_write_config_dword(pdev, PCI_GLI_9755_PLLSSC, pllssc);
+
+ gl9755_wt_off(pdev);
+}
+
+static void sdhci_gli_pre_detect_init(struct sdhci_host *host)
+{
+ /* Need more time on UHS2 detect flow */
+ sdhci_writeb(host, 0xA7, SDHCI_UHS2_TIMER_CTRL);
+}
+
+static void sdhci_gli_overcurrent_event_enable(struct sdhci_host *host, bool enable)
+{
+ u32 mask;
+
+ mask = sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
+ if (enable)
+ mask |= SDHCI_INT_BUS_POWER;
+ else
+ mask &= ~SDHCI_INT_BUS_POWER;
+
+ sdhci_writel(host, mask, SDHCI_SIGNAL_ENABLE);
+
+ mask = sdhci_readl(host, SDHCI_INT_ENABLE);
+ if (enable)
+ mask |= SDHCI_INT_BUS_POWER;
+ else
+ mask &= ~SDHCI_INT_BUS_POWER;
+
+ sdhci_writel(host, mask, SDHCI_INT_ENABLE);
+}
+
+static void gl9755_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ u8 pwr = 0;
+
+ if (mode != MMC_POWER_OFF) {
+ pwr = sdhci_get_vdd_value(vdd);
+ if (!pwr)
+ WARN(1, "%s: Invalid vdd %#x\n", mmc_hostname(host->mmc), vdd);
+ pwr |= SDHCI_VDD2_POWER_180;
+ }
+
+ if (host->pwr == pwr)
+ return;
+
+ host->pwr = pwr;
+
+ if (pwr == 0) {
+ sdhci_gli_overcurrent_event_enable(host, false);
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ } else {
+ sdhci_gli_overcurrent_event_enable(host, false);
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+ pwr |= (SDHCI_POWER_ON | SDHCI_VDD2_POWER_ON);
+
+ sdhci_writeb(host, pwr & 0xf, SDHCI_POWER_CONTROL);
+ /* wait stable */
+ mdelay(5);
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ /* wait stable */
+ mdelay(5);
+ sdhci_gli_overcurrent_event_enable(host, true);
+ }
+}
+
+static bool sdhci_wait_clock_stable(struct sdhci_host *host)
+{
+ u16 clk = 0;
+
+ if (read_poll_timeout_atomic(sdhci_readw, clk, (clk & SDHCI_CLOCK_INT_STABLE),
+ 10, 20000, false, host, SDHCI_CLOCK_CONTROL)) {
+ pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return false;
+ }
+ return true;
+}
+
+static void sdhci_gli_enable_internal_clock(struct sdhci_host *host)
+{
+ u16 ctrl2;
+
+ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ sdhci_writew(host, SDHCI_CLOCK_INT_EN, SDHCI_CLOCK_CONTROL);
+
+ if (!((ctrl2 & SDHCI_CTRL_V4_MODE) &&
+ (ctrl2 & SDHCI_CTRL_UHS2_ENABLE))) {
+ sdhci_wait_clock_stable(host);
+ sdhci_writew(host, SDHCI_CTRL_V4_MODE, SDHCI_HOST_CONTROL2);
+ }
+}
+
+static int sdhci_gli_wait_software_reset_done(struct sdhci_host *host, u8 mask)
+{
+ u8 rst;
+
+ /* hw clears the bit when it's done */
+ if (read_poll_timeout_atomic(sdhci_readb, rst, !(rst & mask),
+ 10, 100000, false, host, SDHCI_SOFTWARE_RESET)) {
+ pr_err("%s: Reset 0x%x never completed.\n", mmc_hostname(host->mmc), (int)mask);
+ sdhci_dumpregs(host);
+ /* manual clear */
+ sdhci_writeb(host, 0, SDHCI_SOFTWARE_RESET);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void sdhci_gli_uhs2_reset_sd_tran(struct sdhci_host *host)
+{
+ /* do this on UHS2 mode */
+ if (host->mmc->uhs2_sd_tran) {
+ sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ sdhci_uhs2_clear_set_irqs(host,
+ SDHCI_INT_ALL_MASK,
+ SDHCI_UHS2_INT_ERROR_MASK);
+ }
+}
+
+static void sdhci_gl9755_reset(struct sdhci_host *host, u8 mask)
+{
+ /* need internal clock */
+ if (mask & SDHCI_RESET_ALL)
+ sdhci_gli_enable_internal_clock(host);
+
+ sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
+
+ /* reset sd-tran on UHS2 mode if need to reset cmd/data */
+ if ((mask & SDHCI_RESET_CMD) | (mask & SDHCI_RESET_DATA))
+ sdhci_gli_uhs2_reset_sd_tran(host);
+
+ if (mask & SDHCI_RESET_ALL)
+ host->clock = 0;
+
+ sdhci_gli_wait_software_reset_done(host, mask);
+}
+
+static inline void gl9767_vhs_read(struct pci_dev *pdev)
+{
+ u32 vhs_enable;
+ u32 vhs_value;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_VHS, &vhs_value);
+ vhs_enable = FIELD_GET(GLI_9767_VHS_REV, vhs_value);
+
+ if (vhs_enable == GLI_9767_VHS_REV_R)
+ return;
+
+ vhs_value &= ~GLI_9767_VHS_REV;
+ vhs_value |= FIELD_PREP(GLI_9767_VHS_REV, GLI_9767_VHS_REV_R);
+
+ pci_write_config_dword(pdev, PCIE_GLI_9767_VHS, vhs_value);
+}
+
+static inline void gl9767_vhs_write(struct pci_dev *pdev)
+{
+ u32 vhs_enable;
+ u32 vhs_value;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_VHS, &vhs_value);
+ vhs_enable = FIELD_GET(GLI_9767_VHS_REV, vhs_value);
+
+ if (vhs_enable == GLI_9767_VHS_REV_W)
+ return;
+
+ vhs_value &= ~GLI_9767_VHS_REV;
+ vhs_value |= FIELD_PREP(GLI_9767_VHS_REV, GLI_9767_VHS_REV_W);
+
+ pci_write_config_dword(pdev, PCIE_GLI_9767_VHS, vhs_value);
+}
+
+static bool gl9767_ssc_enable(struct pci_dev *pdev)
+{
+ u32 value;
+ u8 enable;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_COM_MAILBOX, &value);
+ enable = FIELD_GET(PCIE_GLI_9767_COM_MAILBOX_SSC_EN, value);
+
+ gl9767_vhs_read(pdev);
+
+ return enable;
+}
+
+static void gl9767_set_ssc(struct pci_dev *pdev, u8 enable, u8 step, u16 ppm)
+{
+ u32 pll;
+ u32 ssc;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, &pll);
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL2, &ssc);
+ pll &= ~(PCIE_GLI_9767_SD_PLL_CTL_SSC_STEP_SETTING |
+ PCIE_GLI_9767_SD_PLL_CTL_SSC_EN);
+ ssc &= ~PCIE_GLI_9767_SD_PLL_CTL2_PLLSSC_PPM;
+ pll |= FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_SSC_STEP_SETTING, step) |
+ FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_SSC_EN, enable);
+ ssc |= FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL2_PLLSSC_PPM, ppm);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL2, ssc);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, pll);
+
+ gl9767_vhs_read(pdev);
+}
+
+static void gl9767_set_pll(struct pci_dev *pdev, u8 dir, u16 ldiv, u8 pdiv)
+{
+ u32 pll;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, &pll);
+ pll &= ~(PCIE_GLI_9767_SD_PLL_CTL_PLL_LDIV |
+ PCIE_GLI_9767_SD_PLL_CTL_PLL_PDIV |
+ PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN);
+ pll |= FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_PLL_LDIV, ldiv) |
+ FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_PLL_PDIV, pdiv) |
+ FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN, dir);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, pll);
+
+ gl9767_vhs_read(pdev);
+
+ /* wait for pll stable */
+ usleep_range(1000, 1100);
+}
+
+static void gl9767_set_ssc_pll_205mhz(struct pci_dev *pdev)
+{
+ bool enable = gl9767_ssc_enable(pdev);
+
+ /* set pll to 205MHz and ssc */
+ gl9767_set_ssc(pdev, enable, 0x1F, 0xF5C3);
+ gl9767_set_pll(pdev, 0x1, 0x246, 0x0);
+}
+
+static void gl9767_disable_ssc_pll(struct pci_dev *pdev)
+{
+ u32 pll;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, &pll);
+ pll &= ~(PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN | PCIE_GLI_9767_SD_PLL_CTL_SSC_EN);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, pll);
+
+ gl9767_vhs_read(pdev);
+}
+
+static void gl9767_set_low_power_negotiation(struct pci_dev *pdev, bool enable)
+{
+ u32 value;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
+ if (enable)
+ value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ else
+ value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
+
+ gl9767_vhs_read(pdev);
+}
+
+static void sdhci_gl9767_uhs2_phy_reset(struct sdhci_host *host, bool assert)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value, set, clr;
+
+ if (assert) {
+ /* Assert reset, set RESETN and clean RESETN_VALUE */
+ set = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN;
+ clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE;
+ } else {
+ /* De-assert reset, clean RESETN and set RESETN_VALUE */
+ set = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE;
+ clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN;
+ }
+
+ gl9767_vhs_write(pdev);
+ pci_read_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, &value);
+ value |= set;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value);
+ value &= ~clr;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value);
+ gl9767_vhs_read(pdev);
+}
+
+static void __gl9767_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
+{
+ u8 pwr = 0;
+
+ if (mode != MMC_POWER_OFF) {
+ pwr = sdhci_get_vdd_value(vdd);
+ if (!pwr)
+ WARN(1, "%s: Invalid vdd %#x\n",
+ mmc_hostname(host->mmc), vdd);
+ pwr |= SDHCI_VDD2_POWER_180;
+ }
+
+ if (host->pwr == pwr)
+ return;
+
+ host->pwr = pwr;
+
+ if (pwr == 0) {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ } else {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+ pwr |= SDHCI_POWER_ON;
+ sdhci_writeb(host, pwr & 0xf, SDHCI_POWER_CONTROL);
+ usleep_range(5000, 6250);
+
+ /* Assert reset */
+ sdhci_gl9767_uhs2_phy_reset(host, true);
+ pwr |= SDHCI_VDD2_POWER_ON;
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ usleep_range(5000, 6250);
+ }
+}
+
+static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct mmc_ios *ios = &host->mmc->ios;
+ struct pci_dev *pdev;
+ u16 clk;
+
+ pdev = slot->chip->pdev;
+ host->mmc->actual_clock = 0;
+
+ gl9767_set_low_power_negotiation(pdev, false);
+ gl9767_disable_ssc_pll(pdev);
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0) {
+ gl9767_set_low_power_negotiation(pdev, true);
+ return;
+ }
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
+ host->mmc->actual_clock = 205000000;
+ gl9767_set_ssc_pll_205mhz(pdev);
+ }
+
+ sdhci_enable_clk(host, clk);
+
+ if (mmc_card_uhs2(host->mmc))
+ /* De-assert reset */
+ sdhci_gl9767_uhs2_phy_reset(host, false);
+
+ gl9767_set_low_power_negotiation(pdev, true);
+}
+
+static void sdhci_gl9767_set_card_detect_debounce_time(struct sdhci_host *host)
+{
+ u32 value;
+
+ value = sdhci_readl(host, SDHCI_GLI_9767_SD_HOST_OPERATION_CTL);
+ value &= ~(SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE |
+ SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_SCALE);
+ if (sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)
+ value |= FIELD_PREP(SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE,
+ SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_PLUG_IN_VALUE) |
+ FIELD_PREP(SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_SCALE,
+ SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_SCALE_1MS);
+ else
+ value |= FIELD_PREP(SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE,
+ SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_PLUG_OUT_VALUE) |
+ FIELD_PREP(SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_SCALE,
+ SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_DEBOUNCE_SCALE_10MS);
+ sdhci_writel(host, value, SDHCI_GLI_9767_SD_HOST_OPERATION_CTL);
+}
+
+static void sdhci_gl9767_card_event(struct sdhci_host *host)
+{
+ sdhci_gl9767_set_card_detect_debounce_time(host);
+}
+
+static void gli_set_9767(struct sdhci_host *host)
+{
+ u32 value;
+
+ value = sdhci_readl(host, SDHCI_GLI_9767_GM_BURST_SIZE);
+ value &= ~SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET;
+ sdhci_writel(host, value, SDHCI_GLI_9767_GM_BURST_SIZE);
+
+ value = sdhci_readl(host, SDHCI_GLI_9767_SD_HOST_OPERATION_CTL);
+ value &= ~SDHCI_GLI_9767_SD_HOST_OPERATION_CTL_CMD_CONFLICT_CHECK;
+ sdhci_writel(host, value, SDHCI_GLI_9767_SD_HOST_OPERATION_CTL);
+
+ sdhci_gl9767_set_card_detect_debounce_time(host);
+}
+
+static void gl9767_hw_setting(struct sdhci_pci_slot *slot)
+{
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_PWR_MACRO_CTL, &value);
+ value &= ~(PCIE_GLI_9767_PWR_MACRO_CTL_LOW_VOLTAGE |
+ PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE |
+ PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL);
+
+ value |= PCIE_GLI_9767_PWR_MACRO_CTL_LOW_VOLTAGE |
+ FIELD_PREP(PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE,
+ PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE_VALUE) |
+ FIELD_PREP(PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL,
+ PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL_VALUE);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_PWR_MACRO_CTL, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SCR, &value);
+ value &= ~(PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE0 |
+ PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE1 |
+ PCIE_GLI_9767_SCR_CFG_RST_DATA_LINK_DOWN);
+
+ value |= PCIE_GLI_9767_SCR_AUTO_AXI_W_BURST |
+ PCIE_GLI_9767_SCR_AUTO_AXI_R_BURST |
+ PCIE_GLI_9767_SCR_AXI_REQ |
+ PCIE_GLI_9767_SCR_CARD_DET_PWR_SAVING_EN |
+ PCIE_GLI_9767_SCR_CORE_PWR_D3_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SCR, value);
+
+ gl9767_vhs_read(pdev);
+}
+
+static void sdhci_gl9767_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ /* need internal clock */
+ if (mask & SDHCI_RESET_ALL) {
+ sdhci_gli_enable_internal_clock(host);
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_RESET_REG, &value);
+ value &= ~PCIE_GLI_9767_RESET_REG_SD_HOST_SW_RESET;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_RESET_REG, value);
+
+ if (read_poll_timeout_atomic(pci_read_config_dword, value,
+ !(value & PCIE_GLI_9767_RESET_REG_SD_HOST_SW_RESET),
+ 1, 5, true, pdev, PCIE_GLI_9767_RESET_REG, &value)) {
+ pr_warn("%s: %s: Reset SDHC AHB and TL-AMBA failure.\n",
+ __func__, mmc_hostname(host->mmc));
+ gl9767_vhs_read(pdev);
+ return;
+ }
+ gl9767_vhs_read(pdev);
+ }
+
+ if (mmc_card_uhs2(host->mmc)) {
+ if (mask & (SDHCI_RESET_CMD | SDHCI_RESET_DATA)) {
+ sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
+ sdhci_gli_uhs2_reset_sd_tran(host);
+ sdhci_gli_wait_software_reset_done(host, mask);
+ } else {
+ sdhci_uhs2_reset(host, mask);
+ }
+ } else {
+ sdhci_reset(host, mask);
+ }
+
+ gli_set_9767(host);
+}
+
+static int gl9767_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev;
+ u32 value;
+ int i;
+
+ pdev = slot->chip->pdev;
+
+ if (mmc->ops->get_ro(mmc)) {
+ mmc->ios.timing &= ~(MMC_TIMING_SD_EXP | MMC_TIMING_SD_EXP_1_2V);
+ return 0;
+ }
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_COMBO_MUX_CTL, &value);
+ value &= ~(PCIE_GLI_9767_COMBO_MUX_CTL_RST_EN | PCIE_GLI_9767_COMBO_MUX_CTL_WAIT_PERST_EN);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_COMBO_MUX_CTL, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_DATA_MULTI_CTL, &value);
+ value &= ~PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME;
+ value |= FIELD_PREP(PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME,
+ PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME_VALUE);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_DATA_MULTI_CTL, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2, &value);
+ value |= PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2_SDEI_COMPLETE;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2, &value);
+ value |= PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2_SDEI_COMPLETE_STATUS_EN;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2, &value);
+ value |= PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2_SDEI_COMPLETE_SIGNAL_EN;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
+ value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
+
+ value = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ value &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_PLL_EN);
+ sdhci_writew(host, value, SDHCI_CLOCK_CONTROL);
+
+ value = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ value |= (SDHCI_VDD2_POWER_180 | SDHCI_VDD2_POWER_ON);
+ sdhci_writeb(host, value, SDHCI_POWER_CONTROL);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, &value);
+ value |= PCIE_GLI_9767_SD_EXPRESS_CTL_SDEI_EXE;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, value);
+
+ for (i = 0; i < 2; i++) {
+ usleep_range(10000, 10100);
+ pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2, &value);
+ if (value & PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2_SDEI_COMPLETE) {
+ pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2,
+ value);
+ break;
+ }
+ }
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SDHC_CAP, &value);
+ if (value & PCIE_GLI_9767_SDHC_CAP_SDEI_RESULT) {
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, &value);
+ value |= PCIE_GLI_9767_SD_EXPRESS_CTL_SD_EXPRESS_MODE;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, value);
+ } else {
+ mmc->ios.timing &= ~(MMC_TIMING_SD_EXP | MMC_TIMING_SD_EXP_1_2V);
+
+ value = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ value &= ~(SDHCI_VDD2_POWER_180 | SDHCI_VDD2_POWER_ON);
+ sdhci_writeb(host, value, SDHCI_POWER_CONTROL);
+
+ value = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ value |= (SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_PLL_EN);
+ sdhci_writew(host, value, SDHCI_CLOCK_CONTROL);
+ }
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
+ value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
+ gl9767_vhs_read(pdev);
+
+ return 0;
+}
+
+static void gl9767_vendor_init(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_UHS2_PHY_SET_REG1, &value);
+ value |= FIELD_PREP(PCIE_GLI_9767_UHS2_PHY_SET_REG1_SERDES_INTR,
+ PCIE_GLI_9767_UHS2_PHY_SET_REG1_SERDES_INTR_VALUE);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_PHY_SET_REG1, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_UHS2_PHY_SET_REG2, &value);
+ value |= FIELD_PREP(PCIE_GLI_9767_UHS2_PHY_SET_REG2_SSC_PPM_SETTING,
+ PCIE_GLI_9767_UHS2_PHY_SET_REG2_SSC_PPM_SETTING_VALUE);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_PHY_SET_REG2, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL1, &value);
+ value |= FIELD_PREP(PCIE_GLI_9767_UHS2_CTL1_TRANS_PASS,
+ PCIE_GLI_9767_UHS2_CTL1_TRANS_PASS_VALUE) |
+ FIELD_PREP(PCIE_GLI_9767_UHS2_CTL1_DECODING_CTL,
+ PCIE_GLI_9767_UHS2_CTL1_DECODING_CTL_VALUE) |
+ FIELD_PREP(PCIE_GLI_9767_UHS2_CTL1_SERDES_TRAN,
+ PCIE_GLI_9767_UHS2_CTL1_SERDES_TRAN_VALUE) |
+ FIELD_PREP(PCIE_GLI_9767_UHS2_CTL1_SERDES_RECV,
+ PCIE_GLI_9767_UHS2_CTL1_SERDES_RECV_VALUE) |
+ FIELD_PREP(PCIE_GLI_9767_UHS2_CTL1_DIR_TRANS,
+ PCIE_GLI_9767_UHS2_CTL1_DIR_TRANS_VALUE) |
+ FIELD_PREP(PCIE_GLI_9767_UHS2_CTL1_DIR_RECV,
+ PCIE_GLI_9767_UHS2_CTL1_DIR_RECV_VALUE) |
+ FIELD_PREP(PCIE_GLI_9767_UHS2_CTL1_PDRST,
+ PCIE_GLI_9767_UHS2_CTL1_PDRST_VALUE);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL1, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, &value);
+ value |= FIELD_PREP(PCIE_GLI_9767_UHS2_CTL2_ZC,
+ PCIE_GLI_9767_UHS2_CTL2_ZC_VALUE) |
+ FIELD_PREP(PCIE_GLI_9767_UHS2_CTL2_ZC_CTL,
+ PCIE_GLI_9767_UHS2_CTL2_ZC_CTL_VALUE);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value);
+
+ gl9767_vhs_read(pdev);
+}
+
+static void sdhci_gl9767_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ if (mmc_card_uhs2(host->mmc)) {
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_DATA_MULTI_CTL, &value);
+ value |= PCIE_GLI_9767_SD_DATA_MULTI_CTL_SELECT_UHS2 |
+ PCIE_GLI_9767_SD_DATA_MULTI_CTL_UHS2_SWITCH_CTL;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_DATA_MULTI_CTL, value);
+
+ gl9767_vhs_read(pdev);
+
+ sdhci_gli_overcurrent_event_enable(host, false);
+ __gl9767_uhs2_set_power(host, mode, vdd);
+ sdhci_gli_overcurrent_event_enable(host, true);
+ } else {
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_DATA_MULTI_CTL, &value);
+ value &= ~(PCIE_GLI_9767_SD_DATA_MULTI_CTL_SELECT_UHS2 |
+ PCIE_GLI_9767_SD_DATA_MULTI_CTL_UHS2_SWITCH_CTL);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_DATA_MULTI_CTL, value);
+
+ gl9767_vhs_read(pdev);
+
+ sdhci_gli_overcurrent_event_enable(host, false);
+ sdhci_set_power(host, mode, vdd);
+ sdhci_gli_overcurrent_event_enable(host, true);
+ }
+}
+
static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot)
{
struct sdhci_host *host = slot->host;
@@ -609,6 +1580,23 @@ static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot)
gli_pcie_enable_msi(slot);
slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
sdhci_enable_v4_mode(host);
+ gl9755_vendor_init(host);
+
+ return 0;
+}
+
+static int gli_probe_slot_gl9767(struct sdhci_pci_slot *slot)
+{
+ struct sdhci_host *host = slot->host;
+
+ gli_set_9767(host);
+ gl9767_hw_setting(slot);
+ gli_pcie_enable_msi(slot);
+ slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
+ host->mmc->caps2 |= MMC_CAP2_SD_EXP;
+ host->mmc_host_ops.init_sd_express = gl9767_init_sd_express;
+ sdhci_enable_v4_mode(host);
+ gl9767_vendor_init(host);
return 0;
}
@@ -636,6 +1624,25 @@ static void sdhci_gli_voltage_switch(struct sdhci_host *host)
usleep_range(100000, 110000);
}
+static void sdhci_gl9767_voltage_switch(struct sdhci_host *host)
+{
+ /*
+ * According to Section 3.6.1 signal voltage switch procedure in
+ * SD Host Controller Simplified Spec. 4.20, steps 6~8 are as
+ * follows:
+ * (6) Set 1.8V Signal Enable in the Host Control 2 register.
+ * (7) Wait 5ms. 1.8V voltage regulator shall be stable within this
+ * period.
+ * (8) If 1.8V Signal Enable is cleared by Host Controller, go to
+ * step (12).
+ *
+ * Wait 5ms after set 1.8V signal enable in Host Control 2 register
+ * to ensure 1.8V signal enable bit is set by GL9767.
+ *
+ */
+ usleep_range(5000, 5500);
+}
+
static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
@@ -653,42 +1660,6 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
return value;
}
-#ifdef CONFIG_PM_SLEEP
-static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
-
- pci_free_irq_vectors(slot->chip->pdev);
- gli_pcie_enable_msi(slot);
-
- return sdhci_pci_resume_host(chip);
-}
-
-static int sdhci_cqhci_gli_resume(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
- int ret;
-
- ret = sdhci_pci_gli_resume(chip);
- if (ret)
- return ret;
-
- return cqhci_resume(slot->host->mmc);
-}
-
-static int sdhci_cqhci_gli_suspend(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
- int ret;
-
- ret = cqhci_suspend(slot->host->mmc);
- if (ret)
- return ret;
-
- return sdhci_suspend_host(slot->host);
-}
-#endif
-
static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -704,6 +1675,32 @@ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
}
+static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot,
+ bool enable)
+{
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
+
+ if (enable)
+ value &= ~GLI_9763E_CFG_LPSN_DIS;
+ else
+ value |= GLI_9763E_CFG_LPSN_DIS;
+
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+}
+
static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
unsigned int timing)
{
@@ -812,6 +1809,9 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
if (ret)
goto cleanup;
+ /* Disable LPM negotiation to avoid entering L1 state. */
+ gl9763e_set_low_power_negotiation(slot, false);
+
return 0;
cleanup:
@@ -819,15 +1819,7 @@ cleanup:
return ret;
}
-static void sdhci_gl9763e_reset(struct sdhci_host *host, u8 mask)
-{
- if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
- host->mmc->cqe_private)
- cqhci_deactivate(host->mmc);
- sdhci_reset(host, mask);
-}
-
-static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
+static void gl9763e_hw_setting(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
@@ -856,12 +1848,129 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5);
pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value);
+ /* mask the replay timer timeout of AER */
+ sdhci_gli_mask_replay_timer_timeout(pdev);
+
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
}
+#ifdef CONFIG_PM
+static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ struct sdhci_host *host = slot->host;
+ u16 clock;
+
+ /* Enable LPM negotiation to allow entering L1 state */
+ gl9763e_set_low_power_negotiation(slot, true);
+
+ clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clock &= ~(SDHCI_CLOCK_PLL_EN | SDHCI_CLOCK_CARD_EN);
+ sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+
+ return 0;
+}
+
+static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ struct sdhci_host *host = slot->host;
+ u16 clock;
+
+ if (host->mmc->ios.power_mode != MMC_POWER_ON)
+ return 0;
+
+ clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+
+ clock |= SDHCI_CLOCK_PLL_EN;
+ clock &= ~SDHCI_CLOCK_INT_STABLE;
+ sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+
+ /* Wait max 150 ms */
+ if (read_poll_timeout(sdhci_readw, clock, (clock & SDHCI_CLOCK_INT_STABLE),
+ 1000, 150000, false, host, SDHCI_CLOCK_CONTROL)) {
+ pr_err("%s: PLL clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ }
+
+ clock |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+
+ /* Disable LPM negotiation to avoid entering L1 state. */
+ gl9763e_set_low_power_negotiation(slot, false);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+
+ pci_free_irq_vectors(slot->chip->pdev);
+ gli_pcie_enable_msi(slot);
+
+ return sdhci_pci_resume_host(chip);
+}
+
+static int gl9763e_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ int ret;
+
+ ret = sdhci_pci_gli_resume(chip);
+ if (ret)
+ return ret;
+
+ ret = cqhci_resume(slot->host->mmc);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable LPM negotiation to bring device back in sync
+ * with its runtime_pm state.
+ */
+ gl9763e_set_low_power_negotiation(slot, false);
+
+ return 0;
+}
+
+static int gl9763e_suspend(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ int ret;
+
+ /*
+ * Certain SoCs can suspend only with the bus in low-
+ * power state, notably x86 SoCs when using S0ix.
+ * Re-enable LPM negotiation to allow entering L1 state
+ * and entering system suspend.
+ */
+ gl9763e_set_low_power_negotiation(slot, true);
+
+ ret = cqhci_suspend(slot->host->mmc);
+ if (ret)
+ goto err_suspend;
+
+ ret = sdhci_suspend_host(slot->host);
+ if (ret)
+ goto err_suspend_host;
+
+ return 0;
+
+err_suspend_host:
+ cqhci_resume(slot->host->mmc);
+err_suspend:
+ gl9763e_set_low_power_negotiation(slot, false);
+ return ret;
+}
+#endif
+
static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
@@ -885,25 +1994,53 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
gli_pcie_enable_msi(slot);
host->mmc_host_ops.hs400_enhanced_strobe =
gl9763e_hs400_enhanced_strobe;
- gli_set_gl9763e(slot);
+ gl9763e_hw_setting(slot);
sdhci_enable_v4_mode(host);
return 0;
}
+#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
+
+static u16 sdhci_gli_readw(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + (reg & ~3));
+ u16 word;
+
+ word = (val >> REG_OFFSET_IN_BITS(reg)) & 0xffff;
+ return word;
+}
+
+static u8 sdhci_gli_readb(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + (reg & ~3));
+ u8 byte = (val >> REG_OFFSET_IN_BITS(reg)) & 0xff;
+
+ return byte;
+}
+
static const struct sdhci_ops sdhci_gl9755_ops = {
+ .read_w = sdhci_gli_readw,
+ .read_b = sdhci_gli_readb,
.set_clock = sdhci_gl9755_set_clock,
+ .set_power = gl9755_set_power,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = sdhci_gl9755_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.voltage_switch = sdhci_gli_voltage_switch,
+ .dump_uhs2_regs = sdhci_uhs2_dump_regs,
+ .set_timeout = sdhci_uhs2_set_timeout,
+ .irq = sdhci_uhs2_irq,
+ .uhs2_pre_detect_init = sdhci_gli_pre_detect_init,
};
const struct sdhci_pci_fixes sdhci_gl9755 = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
.probe_slot = gli_probe_slot_gl9755,
+ .add_host = sdhci_pci_uhs2_add_host,
+ .remove_host = sdhci_pci_uhs2_remove_host,
.ops = &sdhci_gl9755_ops,
#ifdef CONFIG_PM_SLEEP
.resume = sdhci_pci_gli_resume,
@@ -911,6 +2048,8 @@ const struct sdhci_pci_fixes sdhci_gl9755 = {
};
static const struct sdhci_ops sdhci_gl9750_ops = {
+ .read_w = sdhci_gli_readw,
+ .read_b = sdhci_gli_readb,
.read_l = sdhci_gl9750_readl,
.set_clock = sdhci_gl9750_set_clock,
.enable_dma = sdhci_pci_enable_dma,
@@ -935,7 +2074,7 @@ static const struct sdhci_ops sdhci_gl9763e_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_gl9763e_reset,
+ .reset = sdhci_and_cqhci_reset,
.set_uhs_signaling = sdhci_set_gl9763e_signaling,
.voltage_switch = sdhci_gli_voltage_switch,
.irq = sdhci_gl9763e_cqhci_irq,
@@ -946,8 +2085,40 @@ const struct sdhci_pci_fixes sdhci_gl9763e = {
.probe_slot = gli_probe_slot_gl9763e,
.ops = &sdhci_gl9763e_ops,
#ifdef CONFIG_PM_SLEEP
- .resume = sdhci_cqhci_gli_resume,
- .suspend = sdhci_cqhci_gli_suspend,
+ .resume = gl9763e_resume,
+ .suspend = gl9763e_suspend,
+#endif
+#ifdef CONFIG_PM
+ .runtime_suspend = gl9763e_runtime_suspend,
+ .runtime_resume = gl9763e_runtime_resume,
+ .allow_runtime_pm = true,
#endif
.add_host = gl9763e_add_host,
};
+
+static const struct sdhci_ops sdhci_gl9767_ops = {
+ .set_clock = sdhci_gl9767_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_gl9767_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .voltage_switch = sdhci_gl9767_voltage_switch,
+ .dump_uhs2_regs = sdhci_uhs2_dump_regs,
+ .set_timeout = sdhci_uhs2_set_timeout,
+ .irq = sdhci_uhs2_irq,
+ .set_power = sdhci_gl9767_set_power,
+ .uhs2_pre_detect_init = sdhci_gli_pre_detect_init,
+ .card_event = sdhci_gl9767_card_event,
+};
+
+const struct sdhci_pci_fixes sdhci_gl9767 = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
+ .probe_slot = gli_probe_slot_gl9767,
+ .add_host = sdhci_pci_uhs2_add_host,
+ .remove_host = sdhci_pci_uhs2_remove_host,
+ .ops = &sdhci_gl9767_ops,
+#ifdef CONFIG_PM_SLEEP
+ .resume = sdhci_pci_gli_resume,
+#endif
+};
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 51d55a87aebe..058bef1c7e41 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -12,6 +12,7 @@
#include <linux/mmc/mmc.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
+#include <linux/bitfield.h>
#include "sdhci.h"
#include "sdhci-pci.h"
@@ -20,6 +21,7 @@
* O2Micro device registers
*/
+#define O2_SD_PCIE_SWITCH 0x54
#define O2_SD_MISC_REG5 0x64
#define O2_SD_LD0_CTRL 0x68
#define O2_SD_DEV_CTRL 0x88
@@ -31,9 +33,11 @@
#define O2_SD_CAPS 0xE0
#define O2_SD_ADMA1 0xE2
#define O2_SD_ADMA2 0xE7
+#define O2_SD_MISC_CTRL2 0xF0
#define O2_SD_INF_MOD 0xF1
#define O2_SD_MISC_CTRL4 0xFC
#define O2_SD_MISC_CTRL 0x1C0
+#define O2_SD_EXP_INT_REG 0x1E0
#define O2_SD_PWR_FORCE_L0 0x0002
#define O2_SD_TUNING_CTRL 0x300
#define O2_SD_PLL_SETTING 0x304
@@ -43,12 +47,19 @@
#define O2_SD_CAP_REG0 0x334
#define O2_SD_UHS1_CAP_SETTING 0x33C
#define O2_SD_DELAY_CTRL 0x350
+#define O2_SD_OUTPUT_CLK_SOURCE_SWITCH 0x354
#define O2_SD_UHS2_L1_CTRL 0x35C
#define O2_SD_FUNC_REG3 0x3E0
#define O2_SD_FUNC_REG4 0x3E4
+#define O2_SD_PARA_SET_REG1 0x444
+#define O2_SD_VDDX_CTRL_REG 0x508
+#define O2_SD_GPIO_CTRL_REG1 0x510
#define O2_SD_LED_ENABLE BIT(6)
#define O2_SD_FREG0_LEDOFF BIT(13)
+#define O2_SD_SEL_DLL BIT(16)
#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
+#define O2_SD_PHASE_MASK GENMASK(23, 20)
+#define O2_SD_FIX_PHASE FIELD_PREP(O2_SD_PHASE_MASK, 0x9)
#define O2_SD_VENDOR_SETTING 0x110
#define O2_SD_VENDOR_SETTING2 0x1C8
@@ -147,6 +158,8 @@ static int sdhci_o2_get_cd(struct mmc_host *mmc)
if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS))
sdhci_o2_enable_internal_clock(host);
+ else
+ sdhci_o2_wait_card_detect_stable(host);
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
@@ -301,20 +314,24 @@ static int sdhci_o2_dll_recovery(struct sdhci_host *host)
static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct sdhci_pci_chip *chip = slot->chip;
int current_bus_width = 0;
u32 scratch32 = 0;
u16 scratch = 0;
+ u8 scratch_8 = 0;
+ u32 reg_val;
/*
- * This handler only implements the eMMC tuning that is specific to
+ * This handler implements the hardware tuning that is specific to
* this controller. Fall back to the standard method for other TIMING.
*/
if ((host->timing != MMC_TIMING_MMC_HS200) &&
- (host->timing != MMC_TIMING_UHS_SDR104))
+ (host->timing != MMC_TIMING_UHS_SDR104) &&
+ (host->timing != MMC_TIMING_UHS_SDR50))
return sdhci_execute_tuning(mmc, opcode);
- if (WARN_ON((opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
- (opcode != MMC_SEND_TUNING_BLOCK)))
+ if (WARN_ON(!mmc_op_tuning(opcode)))
return -EINVAL;
/* Force power mode enter L0 */
@@ -322,6 +339,46 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
scratch |= O2_SD_PWR_FORCE_L0;
sdhci_writew(host, scratch, O2_SD_MISC_CTRL);
+ /* Update output phase */
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_O2_SDS0:
+ case PCI_DEVICE_ID_O2_SEABIRD0:
+ case PCI_DEVICE_ID_O2_SEABIRD1:
+ case PCI_DEVICE_ID_O2_SDS1:
+ case PCI_DEVICE_ID_O2_FUJIN2:
+ /* Stop clk */
+ reg_val = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ reg_val &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, reg_val, SDHCI_CLOCK_CONTROL);
+
+ if (host->timing == MMC_TIMING_MMC_HS200 ||
+ host->timing == MMC_TIMING_UHS_SDR104) {
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+
+ /* Set pcr 0x354[16] to choose dll clock, and set the default phase */
+ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &reg_val);
+ reg_val &= ~(O2_SD_SEL_DLL | O2_SD_PHASE_MASK);
+ reg_val |= (O2_SD_SEL_DLL | O2_SD_FIX_PHASE);
+ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, reg_val);
+
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+ }
+
+ /* Start clk */
+ reg_val = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ reg_val |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, reg_val, SDHCI_CLOCK_CONTROL);
+ break;
+ default:
+ break;
+ }
+
/* wait DLL lock, timeout value 5ms */
if (readx_poll_timeout(sdhci_o2_pll_dll_wdt_control, host,
scratch32, (scratch32 & O2_DLL_LOCK_STATUS), 1, 5000))
@@ -489,7 +546,7 @@ static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip,
ret = pci_find_capability(chip->pdev, PCI_CAP_ID_MSI);
if (!ret) {
- pr_info("%s: unsupport msi, use INTx irq\n",
+ pr_info("%s: unsupported MSI, use INTx irq\n",
mmc_hostname(host->mmc));
return;
}
@@ -523,6 +580,7 @@ static void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
u16 clk;
u8 scratch;
u32 scratch_32;
+ u32 dmdn_208m, dmdn_200m;
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct sdhci_pci_chip *chip = slot->chip;
@@ -533,27 +591,137 @@ static void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 0)
return;
- if ((host->timing == MMC_TIMING_UHS_SDR104) && (clock == 200000000)) {
- pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ if (chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9860 ||
+ chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9861 ||
+ chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9862 ||
+ chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9863) {
+ dmdn_208m = 0x2c500000;
+ dmdn_200m = 0x25200000;
+ } else {
+ dmdn_208m = 0x2c280000;
+ dmdn_200m = 0x25100000;
+ }
- scratch &= 0x7f;
- pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ if ((host->timing == MMC_TIMING_UHS_SDR104) && (clock == 200000000)) {
+ pci_read_config_dword(chip->pdev, O2_SD_PLL_SETTING, &scratch_32);
+ if ((scratch_32 & 0xFFFF0000) != dmdn_208m)
+ o2_pci_set_baseclk(chip, dmdn_208m);
+ } else {
pci_read_config_dword(chip->pdev, O2_SD_PLL_SETTING, &scratch_32);
- if ((scratch_32 & 0xFFFF0000) != 0x2c280000)
- o2_pci_set_baseclk(chip, 0x2c280000);
+ if ((scratch_32 & 0xFFFF0000) != dmdn_200m)
+ o2_pci_set_baseclk(chip, dmdn_200m);
+ }
- pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
+ scratch_32 &= ~(O2_SD_SEL_DLL | O2_SD_PHASE_MASK);
+ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
- scratch |= 0x80;
- pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
- }
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
sdhci_o2_enable_clk(host, clk);
}
+static int sdhci_pci_o2_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct sdhci_pci_chip *chip = slot->chip;
+ u8 scratch8;
+ u16 scratch16;
+ int ret;
+
+ /* Disable clock */
+ sdhci_writeb(host, 0, SDHCI_CLOCK_CONTROL);
+
+ /* Set VDD2 voltage*/
+ scratch8 = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ scratch8 &= 0x0F;
+ if (host->mmc->ios.timing == MMC_TIMING_SD_EXP_1_2V &&
+ host->mmc->caps2 & MMC_CAP2_SD_EXP_1_2V) {
+ scratch8 |= SDHCI_VDD2_POWER_ON | SDHCI_VDD2_POWER_120;
+ } else {
+ scratch8 |= SDHCI_VDD2_POWER_ON | SDHCI_VDD2_POWER_180;
+ }
+
+ sdhci_writeb(host, scratch8, SDHCI_POWER_CONTROL);
+
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch8);
+ scratch8 &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch8);
+
+ /* Wait for express card clkreqn assert */
+ ret = read_poll_timeout(sdhci_readb, scratch8, !(scratch8 & BIT(0)),
+ 1, 30000, false, host, O2_SD_EXP_INT_REG);
+
+ if (!ret) {
+ /* Switch to PCIe mode */
+ scratch16 = sdhci_readw(host, O2_SD_PCIE_SWITCH);
+ scratch16 |= BIT(8);
+ sdhci_writew(host, scratch16, O2_SD_PCIE_SWITCH);
+ } else {
+ /* Power off VDD2 voltage*/
+ scratch8 = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ scratch8 &= 0x0F;
+ sdhci_writeb(host, scratch8, SDHCI_POWER_CONTROL);
+
+ /* Keep mode as UHSI */
+ pci_read_config_word(chip->pdev, O2_SD_PARA_SET_REG1, &scratch16);
+ scratch16 &= ~BIT(11);
+ pci_write_config_word(chip->pdev, O2_SD_PARA_SET_REG1, scratch16);
+
+ host->mmc->ios.timing = MMC_TIMING_LEGACY;
+ pr_info("%s: Express card initialization failed, falling back to Legacy\n",
+ mmc_hostname(host->mmc));
+ }
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch8);
+ scratch8 |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch8);
+
+ return 0;
+}
+
+static void sdhci_pci_o2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
+{
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ u32 scratch_32 = 0;
+ u8 scratch_8 = 0;
+
+ chip = slot->chip;
+
+ if (mode == MMC_POWER_OFF) {
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+
+ /* Set PCR 0x354[16] to switch Clock Source back to OPE Clock */
+ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
+ scratch_32 &= ~(O2_SD_SEL_DLL);
+ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
+
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+ }
+
+ sdhci_set_power(host, mode, vdd);
+}
+
static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
@@ -575,6 +743,11 @@ static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
if (caps & SDHCI_CAN_DO_8BIT)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_DDR50;
+
+ sdhci_pci_o2_enable_msi(chip, host);
+
+ host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning;
switch (chip->pdev->device) {
case PCI_DEVICE_ID_O2_SDS0:
case PCI_DEVICE_ID_O2_SEABIRD0:
@@ -585,8 +758,6 @@ static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
if (reg & 0x1)
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
- sdhci_pci_o2_enable_msi(chip, host);
-
if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD0) {
ret = pci_read_config_dword(chip->pdev,
O2_SD_MISC_SETTING, &reg);
@@ -612,15 +783,22 @@ static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
}
- host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning;
-
if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2)
break;
/* set dll watch dog timer */
reg = sdhci_readl(host, O2_SD_VENDOR_SETTING2);
reg |= (1 << 12);
sdhci_writel(host, reg, O2_SD_VENDOR_SETTING2);
-
+ break;
+ case PCI_DEVICE_ID_O2_GG8_9860:
+ case PCI_DEVICE_ID_O2_GG8_9861:
+ case PCI_DEVICE_ID_O2_GG8_9862:
+ case PCI_DEVICE_ID_O2_GG8_9863:
+ host->mmc->caps2 |= MMC_CAP2_NO_SDIO | MMC_CAP2_SD_EXP | MMC_CAP2_SD_EXP_1_2V;
+ host->mmc->caps |= MMC_CAP_HW_RESET;
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+ slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
+ host->mmc_host_ops.init_sd_express = sdhci_pci_o2_init_sd_express;
break;
default:
break;
@@ -633,6 +811,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
{
int ret;
u8 scratch;
+ u16 scratch16;
u32 scratch_32;
switch (chip->pdev->device) {
@@ -644,7 +823,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -655,7 +834,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_CLKREQ, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x20;
pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
@@ -664,7 +843,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
*/
ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x01;
pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
@@ -677,7 +856,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_INF_MOD, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x08;
pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
@@ -685,7 +864,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@@ -696,7 +875,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -707,7 +886,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG0,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 = ((scratch_32 & 0xFF000000) >> 24);
/* Check Whether subId is 0x11 or 0x12 */
@@ -719,7 +898,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
/* Enable Base Clk setting change */
scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
@@ -742,7 +921,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLK_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0xFF00);
scratch_32 |= 0x07E0C800;
@@ -752,14 +931,14 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLKREQ, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 |= 0x3;
pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0x1F3F070E);
scratch_32 |= 0x18270106;
@@ -770,7 +949,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CAP_REG2, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0xE0);
pci_write_config_dword(chip->pdev,
O2_SD_CAP_REG2, scratch_32);
@@ -782,7 +961,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@@ -792,7 +971,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -800,7 +979,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
if ((scratch_32 & 0xff000000) == 0x01000000) {
scratch_32 &= 0x0000FFFF;
@@ -819,7 +998,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 |= (1 << 22);
pci_write_config_dword(chip->pdev,
O2_SD_FUNC_REG4, scratch_32);
@@ -828,17 +1007,66 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* Set Tuning Windows to 5 */
pci_write_config_byte(chip->pdev,
O2_SD_TUNING_CTRL, 0x55);
+ //Adjust 1st and 2nd CD debounce time
+ pci_read_config_dword(chip->pdev, O2_SD_MISC_CTRL2, &scratch_32);
+ scratch_32 &= 0xFFE7FFFF;
+ scratch_32 |= 0x00180000;
+ pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL2, scratch_32);
+ pci_write_config_dword(chip->pdev, O2_SD_DETECT_SETTING, 1);
/* Lock WP */
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+ case PCI_DEVICE_ID_O2_GG8_9860:
+ case PCI_DEVICE_ID_O2_GG8_9861:
+ case PCI_DEVICE_ID_O2_GG8_9862:
+ case PCI_DEVICE_ID_O2_GG8_9863:
+ /* UnLock WP */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ goto read_fail;
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ /* Select mode switch source as software control */
+ pci_read_config_word(chip->pdev, O2_SD_PARA_SET_REG1, &scratch16);
+ scratch16 &= 0xF8FF;
+ scratch16 |= BIT(9);
+ pci_write_config_word(chip->pdev, O2_SD_PARA_SET_REG1, scratch16);
+
+ /* set VDD1 supply source */
+ pci_read_config_word(chip->pdev, O2_SD_VDDX_CTRL_REG, &scratch16);
+ scratch16 &= 0xFFE3;
+ scratch16 |= BIT(3);
+ pci_write_config_word(chip->pdev, O2_SD_VDDX_CTRL_REG, scratch16);
+
+ /* Set host drive strength*/
+ scratch16 = 0x0025;
+ pci_write_config_word(chip->pdev, O2_SD_PLL_SETTING, scratch16);
+
+ /* Set output delay*/
+ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
+ scratch_32 &= 0xFF0FFF00;
+ scratch_32 |= 0x00B0003B;
+ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
}
return 0;
+
+read_fail:
+ return pcibios_err_to_errno(ret);
}
#ifdef CONFIG_PM_SLEEP
@@ -855,6 +1083,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_pci_o2_set_power,
};
const struct sdhci_pci_fixes sdhci_o2 = {
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 8f90c4163bb5..f38f0bd4165c 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -11,6 +11,10 @@
#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
+#define PCI_DEVICE_ID_O2_GG8_9860 0x9860
+#define PCI_DEVICE_ID_O2_GG8_9861 0x9861
+#define PCI_DEVICE_ID_O2_GG8_9862 0x9862
+#define PCI_DEVICE_ID_O2_GG8_9863 0x9863
#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
@@ -59,6 +63,7 @@
#define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8
#define PCI_DEVICE_ID_INTEL_LKF_EMMC 0x98c4
#define PCI_DEVICE_ID_INTEL_LKF_SD 0x98f8
+#define PCI_DEVICE_ID_INTEL_ADL_EMMC 0x54c4
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
@@ -75,6 +80,7 @@
#define PCI_DEVICE_ID_GLI_9755 0x9755
#define PCI_DEVICE_ID_GLI_9750 0x9750
#define PCI_DEVICE_ID_GLI_9763E 0xe763
+#define PCI_DEVICE_ID_GLI_9767 0x9767
/*
* PCI device class and mask
@@ -139,6 +145,7 @@ struct sdhci_pci_fixes {
int (*probe_slot) (struct sdhci_pci_slot *);
int (*add_host) (struct sdhci_pci_slot *);
void (*remove_slot) (struct sdhci_pci_slot *, int);
+ void (*remove_host) (struct sdhci_pci_slot *, int);
#ifdef CONFIG_PM_SLEEP
int (*suspend) (struct sdhci_pci_chip *);
@@ -150,17 +157,13 @@ struct sdhci_pci_fixes {
#endif
const struct sdhci_ops *ops;
+ const struct dmi_system_id *cd_gpio_override;
size_t priv_size;
};
struct sdhci_pci_slot {
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
- struct sdhci_pci_data *data;
-
- int rst_n_gpio;
- int cd_gpio;
- int cd_irq;
int cd_idx;
bool cd_override_level;
@@ -188,6 +191,8 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
return (void *)slot->private;
}
+int sdhci_pci_uhs2_add_host(struct sdhci_pci_slot *slot);
+void sdhci_pci_uhs2_remove_host(struct sdhci_pci_slot *slot, int dead);
#ifdef CONFIG_PM_SLEEP
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
@@ -199,5 +204,6 @@ extern const struct sdhci_pci_fixes sdhci_o2;
extern const struct sdhci_pci_fixes sdhci_gl9750;
extern const struct sdhci_pci_fixes sdhci_gl9755;
extern const struct sdhci_pci_fixes sdhci_gl9763e;
+extern const struct sdhci_pci_fixes sdhci_gl9767;
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c
index 6696b6bdd88e..7ddac0befed8 100644
--- a/drivers/mmc/host/sdhci-pic32.c
+++ b/drivers/mmc/host/sdhci-pic32.c
@@ -157,20 +157,20 @@ static int pic32_sdhci_probe(struct platform_device *pdev)
ret = plat_data->setup_dma(ADMA_FIFO_RD_THSHLD,
ADMA_FIFO_WR_THSHLD);
if (ret)
- goto err_host;
+ goto err;
}
sdhci_pdata->sys_clk = devm_clk_get(&pdev->dev, "sys_clk");
if (IS_ERR(sdhci_pdata->sys_clk)) {
ret = PTR_ERR(sdhci_pdata->sys_clk);
dev_err(&pdev->dev, "Error getting clock\n");
- goto err_host;
+ goto err;
}
ret = clk_prepare_enable(sdhci_pdata->sys_clk);
if (ret) {
dev_err(&pdev->dev, "Error enabling clock\n");
- goto err_host;
+ goto err;
}
sdhci_pdata->base_clk = devm_clk_get(&pdev->dev, "base_clk");
@@ -203,14 +203,12 @@ err_base_clk:
clk_disable_unprepare(sdhci_pdata->base_clk);
err_sys_clk:
clk_disable_unprepare(sdhci_pdata->sys_clk);
-err_host:
- sdhci_pltfm_free(pdev);
err:
dev_err(&pdev->dev, "pic32-sdhci probe failed: %d\n", ret);
return ret;
}
-static int pic32_sdhci_remove(struct platform_device *pdev)
+static void pic32_sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct pic32_sdhci_priv *sdhci_pdata = sdhci_priv(host);
@@ -220,9 +218,6 @@ static int pic32_sdhci_remove(struct platform_device *pdev)
sdhci_remove_host(host, scratch == (u32)~0);
clk_disable_unprepare(sdhci_pdata->base_clk);
clk_disable_unprepare(sdhci_pdata->sys_clk);
- sdhci_pltfm_free(pdev);
-
- return 0;
}
static const struct of_device_id pic32_sdhci_id_table[] = {
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 328b132bbe57..7f6ac636f040 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -19,7 +19,6 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/property.h>
-#include <linux/of.h>
#ifdef CONFIG_PPC
#include <asm/machdep.h>
#endif
@@ -54,27 +53,20 @@ static bool sdhci_wp_inverted(struct device *dev)
#endif /* CONFIG_PPC */
}
-#ifdef CONFIG_OF
static void sdhci_get_compatibility(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct sdhci_host *host = platform_get_drvdata(pdev);
- struct device_node *np = pdev->dev.of_node;
-
- if (!np)
- return;
- if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc"))
+ if (device_is_compatible(dev, "fsl,p2020-rev1-esdhc"))
host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
- if (of_device_is_compatible(np, "fsl,p2020-esdhc") ||
- of_device_is_compatible(np, "fsl,p1010-esdhc") ||
- of_device_is_compatible(np, "fsl,t4240-esdhc") ||
- of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
+ if (device_is_compatible(dev, "fsl,p2020-esdhc") ||
+ device_is_compatible(dev, "fsl,p1010-esdhc") ||
+ device_is_compatible(dev, "fsl,t4240-esdhc") ||
+ device_is_compatible(dev, "fsl,mpc8536-esdhc"))
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
}
-#else
-void sdhci_get_compatibility(struct platform_device *pdev) {}
-#endif /* CONFIG_OF */
void sdhci_get_property(struct platform_device *pdev)
{
@@ -119,26 +111,21 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
{
struct sdhci_host *host;
void __iomem *ioaddr;
- int irq, ret;
+ int irq;
ioaddr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(ioaddr)) {
- ret = PTR_ERR(ioaddr);
- goto err;
- }
+ if (IS_ERR(ioaddr))
+ return ERR_CAST(ioaddr);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err;
- }
+ if (irq < 0)
+ return ERR_PTR(irq);
host = sdhci_alloc_host(&pdev->dev,
sizeof(struct sdhci_pltfm_host) + priv_size);
-
if (IS_ERR(host)) {
- ret = PTR_ERR(host);
- goto err;
+ dev_err(&pdev->dev, "%s failed %pe\n", __func__, host);
+ return ERR_CAST(host);
}
host->ioaddr = ioaddr;
@@ -156,26 +143,14 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
platform_set_drvdata(pdev, host);
return host;
-err:
- dev_err(&pdev->dev, "%s failed %d\n", __func__, ret);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_init);
-void sdhci_pltfm_free(struct platform_device *pdev)
-{
- struct sdhci_host *host = platform_get_drvdata(pdev);
-
- sdhci_free_host(host);
-}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_free);
-
-int sdhci_pltfm_register(struct platform_device *pdev,
- const struct sdhci_pltfm_data *pdata,
- size_t priv_size)
+int sdhci_pltfm_init_and_add_host(struct platform_device *pdev,
+ const struct sdhci_pltfm_data *pdata,
+ size_t priv_size)
{
struct sdhci_host *host;
- int ret = 0;
host = sdhci_pltfm_init(pdev, pdata, priv_size);
if (IS_ERR(host))
@@ -183,27 +158,18 @@ int sdhci_pltfm_register(struct platform_device *pdev,
sdhci_get_property(pdev);
- ret = sdhci_add_host(host);
- if (ret)
- sdhci_pltfm_free(pdev);
-
- return ret;
+ return sdhci_add_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_register);
+EXPORT_SYMBOL_GPL(sdhci_pltfm_init_and_add_host);
-int sdhci_pltfm_unregister(struct platform_device *pdev)
+void sdhci_pltfm_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
sdhci_remove_host(host, dead);
- clk_disable_unprepare(pltfm_host->clk);
- sdhci_pltfm_free(pdev);
-
- return 0;
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
+EXPORT_SYMBOL_GPL(sdhci_pltfm_remove);
#ifdef CONFIG_PM_SLEEP
int sdhci_pltfm_suspend(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 9bd717ff784b..9c32e8a289d6 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -97,12 +97,11 @@ static inline void sdhci_get_of_property(struct platform_device *pdev)
extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
const struct sdhci_pltfm_data *pdata,
size_t priv_size);
-extern void sdhci_pltfm_free(struct platform_device *pdev);
-extern int sdhci_pltfm_register(struct platform_device *pdev,
- const struct sdhci_pltfm_data *pdata,
- size_t priv_size);
-extern int sdhci_pltfm_unregister(struct platform_device *pdev);
+extern int sdhci_pltfm_init_and_add_host(struct platform_device *pdev,
+ const struct sdhci_pltfm_data *pdata,
+ size_t priv_size);
+extern void sdhci_pltfm_remove(struct platform_device *pdev);
extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host);
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index f18906b5575f..76346353dc55 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -19,7 +19,9 @@
#include <linux/platform_data/pxa_sdhci.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/mmc.h>
+#include <linux/pinctrl/consumer.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
@@ -41,6 +43,13 @@
#define MMC_CARD 0x1000
#define MMC_WIDTH 0x0100
+struct sdhci_pxav2_host {
+ struct mmc_request *sdio_mrq;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_cmd_gpio;
+};
+
static void pxav2_reset(struct sdhci_host *host, u8 mask)
{
struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
@@ -80,6 +89,71 @@ static void pxav2_reset(struct sdhci_host *host, u8 mask)
}
}
+static u16 pxav1_readw(struct sdhci_host *host, int reg)
+{
+ /* Workaround for data abort exception on SDH2 and SDH4 on PXA168 */
+ if (reg == SDHCI_HOST_VERSION)
+ return readl(host->ioaddr + SDHCI_HOST_VERSION - 2) >> 16;
+
+ return readw(host->ioaddr + reg);
+}
+
+static u32 pxav1_irq(struct sdhci_host *host, u32 intmask)
+{
+ struct sdhci_pxav2_host *pxav2_host = sdhci_pltfm_priv(sdhci_priv(host));
+ struct mmc_request *sdio_mrq;
+
+ if (pxav2_host->sdio_mrq && (intmask & SDHCI_INT_CMD_MASK)) {
+ /* The dummy CMD0 for the SDIO workaround just completed */
+ sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, SDHCI_INT_STATUS);
+ intmask &= ~SDHCI_INT_CMD_MASK;
+
+ /* Restore MMC function to CMD pin */
+ if (pxav2_host->pinctrl && pxav2_host->pins_default)
+ pinctrl_select_state(pxav2_host->pinctrl, pxav2_host->pins_default);
+
+ sdio_mrq = pxav2_host->sdio_mrq;
+ pxav2_host->sdio_mrq = NULL;
+ mmc_request_done(host->mmc, sdio_mrq);
+ }
+
+ return intmask;
+}
+
+static void pxav1_request_done(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ u16 tmp;
+ struct sdhci_pxav2_host *pxav2_host;
+
+ /* If this is an SDIO command, perform errata workaround for silicon bug */
+ if (!mrq->cmd->error &&
+ (mrq->cmd->opcode == SD_IO_RW_DIRECT ||
+ mrq->cmd->opcode == SD_IO_RW_EXTENDED)) {
+ /* Reset data port */
+ tmp = readw(host->ioaddr + SDHCI_TIMEOUT_CONTROL);
+ tmp |= 0x400;
+ writew(tmp, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
+
+ /* Clock is now stopped, so restart it by sending a dummy CMD0 */
+ pxav2_host = sdhci_pltfm_priv(sdhci_priv(host));
+ pxav2_host->sdio_mrq = mrq;
+
+ /* Set CMD as high output rather than MMC function while we do CMD0 */
+ if (pxav2_host->pinctrl && pxav2_host->pins_cmd_gpio)
+ pinctrl_select_state(pxav2_host->pinctrl, pxav2_host->pins_cmd_gpio);
+
+ sdhci_writel(host, 0, SDHCI_ARGUMENT);
+ sdhci_writew(host, 0, SDHCI_TRANSFER_MODE);
+ sdhci_writew(host, SDHCI_MAKE_CMD(MMC_GO_IDLE_STATE, SDHCI_CMD_RESP_NONE),
+ SDHCI_COMMAND);
+
+ /* Don't finish this request until the dummy CMD0 finishes */
+ return;
+ }
+
+ mmc_request_done(host->mmc, mrq);
+}
+
static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width)
{
u8 ctrl;
@@ -101,6 +175,27 @@ static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width)
writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
}
+struct sdhci_pxa_variant {
+ const struct sdhci_ops *ops;
+ unsigned int extra_quirks;
+};
+
+static const struct sdhci_ops pxav1_sdhci_ops = {
+ .read_w = pxav1_readw,
+ .set_clock = sdhci_set_clock,
+ .irq = pxav1_irq,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = pxav2_mmc_set_bus_width,
+ .reset = pxav2_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .request_done = pxav1_request_done,
+};
+
+static const struct sdhci_pxa_variant __maybe_unused pxav1_variant = {
+ .ops = &pxav1_sdhci_ops,
+ .extra_quirks = SDHCI_QUIRK_NO_BUSY_IRQ | SDHCI_QUIRK_32BIT_DMA_SIZE,
+};
+
static const struct sdhci_ops pxav2_sdhci_ops = {
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -109,11 +204,14 @@ static const struct sdhci_ops pxav2_sdhci_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
+static const struct sdhci_pxa_variant pxav2_variant = {
+ .ops = &pxav2_sdhci_ops,
+};
+
#ifdef CONFIG_OF
static const struct of_device_id sdhci_pxav2_of_match[] = {
- {
- .compatible = "mrvl,pxav2-mmc",
- },
+ { .compatible = "mrvl,pxav1-mmc", .data = &pxav1_variant, },
+ { .compatible = "mrvl,pxav2-mmc", .data = &pxav2_variant, },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_pxav2_of_match);
@@ -129,7 +227,7 @@ static struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev)
if (!pdata)
return NULL;
- if (of_find_property(np, "non-removable", NULL))
+ if (of_property_read_bool(np, "non-removable"))
pdata->flags |= PXA_FLAG_CARD_PERMANENT;
of_property_read_u32(np, "bus-width", &bus_width);
@@ -155,40 +253,42 @@ static int sdhci_pxav2_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ struct sdhci_pxav2_host *pxav2_host;
struct device *dev = &pdev->dev;
struct sdhci_host *host = NULL;
- const struct of_device_id *match;
+ const struct sdhci_pxa_variant *variant;
- int ret;
- struct clk *clk;
+ struct clk *clk, *clk_core;
- host = sdhci_pltfm_init(pdev, NULL, 0);
+ host = sdhci_pltfm_init(pdev, NULL, sizeof(*pxav2_host));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
+ pxav2_host = sdhci_pltfm_priv(pltfm_host);
- clk = devm_clk_get(dev, "PXA-SDHCLK");
- if (IS_ERR(clk)) {
- dev_err(dev, "failed to get io clock\n");
- ret = PTR_ERR(clk);
- goto free;
- }
+ clk = devm_clk_get_optional_enabled(dev, "io");
+ if (!clk)
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get io clock\n");
pltfm_host->clk = clk;
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable io clock\n");
- goto free;
- }
+
+ clk_core = devm_clk_get_optional_enabled(dev, "core");
+ if (IS_ERR(clk_core))
+ return dev_err_probe(dev, PTR_ERR(clk_core),
+ "failed to enable core clock\n");
host->quirks = SDHCI_QUIRK_BROKEN_ADMA
| SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
| SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
- match = of_match_device(of_match_ptr(sdhci_pxav2_of_match), &pdev->dev);
- if (match) {
+ variant = of_device_get_match_data(dev);
+ if (variant)
pdata = pxav2_get_mmc_pdata(dev);
- }
+ else
+ variant = &pxav2_variant;
+
if (pdata) {
if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
/* on-chip device */
@@ -208,19 +308,25 @@ static int sdhci_pxav2_probe(struct platform_device *pdev)
host->mmc->pm_caps |= pdata->pm_caps;
}
- host->ops = &pxav2_sdhci_ops;
-
- ret = sdhci_add_host(host);
- if (ret)
- goto disable_clk;
-
- return 0;
+ host->quirks |= variant->extra_quirks;
+ host->ops = variant->ops;
+
+ /* Set up optional pinctrl for PXA168 SDIO IRQ fix */
+ pxav2_host->pinctrl = devm_pinctrl_get(dev);
+ if (!IS_ERR(pxav2_host->pinctrl)) {
+ pxav2_host->pins_cmd_gpio = pinctrl_lookup_state(pxav2_host->pinctrl,
+ "state_cmd_gpio");
+ if (IS_ERR(pxav2_host->pins_cmd_gpio))
+ pxav2_host->pins_cmd_gpio = NULL;
+ pxav2_host->pins_default = pinctrl_lookup_state(pxav2_host->pinctrl,
+ "default");
+ if (IS_ERR(pxav2_host->pins_default))
+ pxav2_host->pins_default = NULL;
+ } else {
+ pxav2_host->pinctrl = NULL;
+ }
-disable_clk:
- clk_disable_unprepare(clk);
-free:
- sdhci_pltfm_free(pdev);
- return ret;
+ return sdhci_add_host(host);
}
static struct platform_driver sdhci_pxav2_driver = {
@@ -231,7 +337,7 @@ static struct platform_driver sdhci_pxav2_driver = {
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_pxav2_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_pxav2_driver);
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index a6d89a3f1946..d082c4e21aa9 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -20,9 +20,11 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/mbus.h>
+#include <linux/units.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
@@ -51,6 +53,9 @@ struct sdhci_pxa {
struct clk *clk_io;
u8 power_mode;
void __iomem *sdio3_conf_reg;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_uhs;
};
/*
@@ -124,10 +129,8 @@ static int armada_38x_quirks(struct platform_device *pdev,
struct resource *res;
host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
- host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
- host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
- host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ sdhci_read_caps(host);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"conf-sdio3");
@@ -315,8 +318,20 @@ static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
}
+static void pxav3_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *phost = sdhci_priv(host);
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(phost);
+ struct pinctrl_state *pins = clock < 100 * HZ_PER_MHZ ? pxa->pins_default : pxa->pins_uhs;
+
+ if (pins)
+ pinctrl_select_state(pxa->pinctrl, pins);
+
+ sdhci_set_clock(host, clock);
+}
+
static const struct sdhci_ops pxav3_sdhci_ops = {
- .set_clock = sdhci_set_clock,
+ .set_clock = pxav3_set_clock,
.set_power = pxav3_set_power,
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -368,6 +383,19 @@ static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
}
#endif
+static struct pinctrl_state *pxav3_lookup_pinstate(struct device *dev, struct pinctrl *pinctrl,
+ const char *name)
+{
+ struct pinctrl_state *pins = pinctrl_lookup_state(pinctrl, name);
+
+ if (IS_ERR(pins)) {
+ dev_dbg(dev, "could not get pinstate '%s': %ld\n", name, PTR_ERR(pins));
+ return NULL;
+ }
+
+ return pins;
+}
+
static int sdhci_pxav3_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
@@ -391,8 +419,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
pxa->clk_io = devm_clk_get(dev, NULL);
if (IS_ERR(pxa->clk_io)) {
dev_err(dev, "failed to get io clock\n");
- ret = PTR_ERR(pxa->clk_io);
- goto err_clk_get;
+ return PTR_ERR(pxa->clk_io);
}
pltfm_host->clk = pxa->clk_io;
clk_prepare_enable(pxa->clk_io);
@@ -401,6 +428,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (!IS_ERR(pxa->clk_core))
clk_prepare_enable(pxa->clk_core);
+ host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
@@ -442,6 +470,15 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
host->mmc->pm_caps |= pdata->pm_caps;
}
+ pxa->pinctrl = devm_pinctrl_get(dev);
+ if (!IS_ERR(pxa->pinctrl)) {
+ pxa->pins_default = pxav3_lookup_pinstate(dev, pxa->pinctrl, "default");
+ if (pxa->pins_default)
+ pxa->pins_uhs = pxav3_lookup_pinstate(dev, pxa->pinctrl, "state_uhs");
+ } else {
+ dev_dbg(dev, "could not get pinctrl handle: %ld\n", PTR_ERR(pxa->pinctrl));
+ }
+
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS);
@@ -467,12 +504,10 @@ err_of_parse:
err_mbus_win:
clk_disable_unprepare(pxa->clk_io);
clk_disable_unprepare(pxa->clk_core);
-err_clk_get:
- sdhci_pltfm_free(pdev);
return ret;
}
-static int sdhci_pxav3_remove(struct platform_device *pdev)
+static void sdhci_pxav3_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -486,13 +521,8 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
clk_disable_unprepare(pxa->clk_io);
clk_disable_unprepare(pxa->clk_core);
-
- sdhci_pltfm_free(pdev);
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_pxav3_suspend(struct device *dev)
{
int ret;
@@ -502,7 +532,6 @@ static int sdhci_pxav3_suspend(struct device *dev)
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
ret = sdhci_suspend_host(host);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -515,24 +544,18 @@ static int sdhci_pxav3_resume(struct device *dev)
pm_runtime_get_sync(dev);
ret = sdhci_resume_host(host);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
-#endif
-#ifdef CONFIG_PM
static int sdhci_pxav3_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
- int ret;
- ret = sdhci_runtime_suspend_host(host);
- if (ret)
- return ret;
+ sdhci_runtime_suspend_host(host);
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
@@ -554,14 +577,13 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
if (!IS_ERR(pxa->clk_core))
clk_prepare_enable(pxa->clk_core);
- return sdhci_runtime_resume_host(host, 0);
+ sdhci_runtime_resume_host(host, 0);
+ return 0;
}
-#endif
static const struct dev_pm_ops sdhci_pxav3_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume)
- SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend,
- sdhci_pxav3_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume)
+ RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend, sdhci_pxav3_runtime_resume, NULL)
};
static struct platform_driver sdhci_pxav3_driver = {
@@ -569,7 +591,7 @@ static struct platform_driver sdhci_pxav3_driver = {
.name = "sdhci-pxav3",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(sdhci_pxav3_of_match),
- .pm = &sdhci_pxav3_pmops,
+ .pm = pm_ptr(&sdhci_pxav3_pmops),
},
.probe = sdhci_pxav3_probe,
.remove = sdhci_pxav3_remove,
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 862f033d235d..6bf66aaa86a6 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -17,11 +17,8 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -133,14 +130,16 @@ struct sdhci_s3c {
* struct sdhci_s3c_drv_data - S3C SDHCI platform specific driver data
* @sdhci_quirks: sdhci host specific quirks.
* @no_divider: no or non-standard internal clock divider.
+ * @ops: sdhci_ops to use for this variant
*
* Specifies platform specific configuration of sdhci controller.
* Note: A structure for driver specific platform data is used for future
* expansion of its usage.
*/
struct sdhci_s3c_drv_data {
- unsigned int sdhci_quirks;
- bool no_divider;
+ unsigned int sdhci_quirks;
+ bool no_divider;
+ const struct sdhci_ops *ops;
};
static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
@@ -415,7 +414,7 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
-static struct sdhci_ops sdhci_s3c_ops = {
+static const struct sdhci_ops sdhci_s3c_ops_s3c6410 = {
.get_max_clock = sdhci_s3c_get_max_clk,
.set_clock = sdhci_s3c_set_clock,
.get_min_clock = sdhci_s3c_get_min_clock,
@@ -424,6 +423,15 @@ static struct sdhci_ops sdhci_s3c_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
+static const struct sdhci_ops sdhci_s3c_ops_exynos4 __maybe_unused = {
+ .get_max_clock = sdhci_cmu_get_max_clock,
+ .set_clock = sdhci_cmu_set_clock,
+ .get_min_clock = sdhci_cmu_get_min_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
#ifdef CONFIG_OF
static int sdhci_s3c_parse_dt(struct device *dev,
struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
@@ -437,17 +445,17 @@ static int sdhci_s3c_parse_dt(struct device *dev,
pdata->max_width = max_width;
/* get the card detection method */
- if (of_get_property(node, "broken-cd", NULL)) {
+ if (of_property_read_bool(node, "broken-cd")) {
pdata->cd_type = S3C_SDHCI_CD_NONE;
return 0;
}
- if (of_get_property(node, "non-removable", NULL)) {
+ if (of_property_read_bool(node, "non-removable")) {
pdata->cd_type = S3C_SDHCI_CD_PERMANENT;
return 0;
}
- if (of_get_named_gpio(node, "cd-gpios", 0))
+ if (of_property_present(node, "cd-gpios"))
return 0;
/* assuming internal card detect that will be configured by pinctrl */
@@ -499,15 +507,13 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
sc = sdhci_priv(host);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- ret = -ENOMEM;
- goto err_pdata_io_clk;
- }
+ if (!pdata)
+ return -ENOMEM;
if (pdev->dev.of_node) {
ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata);
if (ret)
- goto err_pdata_io_clk;
+ return ret;
} else {
memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
}
@@ -524,8 +530,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
sc->clk_io = devm_clk_get(dev, "hsmmc");
if (IS_ERR(sc->clk_io)) {
dev_err(dev, "failed to get io clock\n");
- ret = PTR_ERR(sc->clk_io);
- goto err_pdata_io_clk;
+ return PTR_ERR(sc->clk_io);
}
/* enable the local io clock and keep it running for the moment. */
@@ -563,7 +568,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
pdata->cfg_gpio(pdev, pdata->max_width);
host->hw_name = "samsung-hsmmc";
- host->ops = &sdhci_s3c_ops;
+ host->ops = &sdhci_s3c_ops_s3c6410;
host->quirks = 0;
host->quirks2 = 0;
host->irq = irq;
@@ -573,6 +578,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
if (drv_data) {
host->quirks |= drv_data->sdhci_quirks;
+ host->ops = drv_data->ops;
sc->no_divider = drv_data->no_divider;
}
@@ -620,16 +626,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
- /*
- * If controller does not have internal clock divider,
- * we can use overriding functions instead of default.
- */
- if (sc->no_divider) {
- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
- }
-
/* It supports additional host capabilities if needed */
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
@@ -662,13 +658,10 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
err_no_busclks:
clk_disable_unprepare(sc->clk_io);
- err_pdata_io_clk:
- sdhci_free_host(host);
-
return ret;
}
-static int sdhci_s3c_remove(struct platform_device *pdev)
+static void sdhci_s3c_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_s3c *sc = sdhci_priv(host);
@@ -686,13 +679,8 @@ static int sdhci_s3c_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(sc->clk_io);
-
- sdhci_free_host(host);
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_s3c_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -709,17 +697,14 @@ static int sdhci_s3c_resume(struct device *dev)
return sdhci_resume_host(host);
}
-#endif
-#ifdef CONFIG_PM
static int sdhci_s3c_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_s3c *ourhost = to_s3c(host);
struct clk *busclk = ourhost->clk_io;
- int ret;
- ret = sdhci_runtime_suspend_host(host);
+ sdhci_runtime_suspend_host(host);
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
@@ -727,7 +712,7 @@ static int sdhci_s3c_runtime_suspend(struct device *dev)
if (ourhost->cur_clk >= 0)
clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
clk_disable_unprepare(busclk);
- return ret;
+ return 0;
}
static int sdhci_s3c_runtime_resume(struct device *dev)
@@ -735,20 +720,17 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_s3c *ourhost = to_s3c(host);
struct clk *busclk = ourhost->clk_io;
- int ret;
clk_prepare_enable(busclk);
if (ourhost->cur_clk >= 0)
clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
- ret = sdhci_runtime_resume_host(host, 0);
- return ret;
+ sdhci_runtime_resume_host(host, 0);
+ return 0;
}
-#endif
static const struct dev_pm_ops sdhci_s3c_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume)
- SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume)
+ RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume, NULL)
};
static const struct platform_device_id sdhci_s3c_driver_ids[] = {
@@ -763,6 +745,7 @@ MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
#ifdef CONFIG_OF
static const struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
.no_divider = true,
+ .ops = &sdhci_s3c_ops_exynos4,
};
static const struct of_device_id sdhci_s3c_dt_match[] = {
@@ -782,7 +765,7 @@ static struct platform_driver sdhci_s3c_driver = {
.name = "s3c-sdhci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(sdhci_s3c_dt_match),
- .pm = &sdhci_s3c_pmops,
+ .pm = pm_ptr(&sdhci_s3c_pmops),
},
};
@@ -791,4 +774,3 @@ module_platform_driver(sdhci_s3c_driver);
MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c-sdhci");
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index d463e2fd5b1a..72d21dc0cb69 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -59,15 +59,15 @@ static int sdhci_probe(struct platform_device *pdev)
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret);
- goto err_host;
+ goto err;
}
host->hw_name = "sdhci";
host->ops = &sdhci_pltfm_ops;
host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0) {
- ret = -EINVAL;
- goto err_host;
+ if (host->irq < 0) {
+ ret = host->irq;
+ goto err;
}
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
@@ -78,13 +78,13 @@ static int sdhci_probe(struct platform_device *pdev)
if (IS_ERR(sdhci->clk)) {
ret = PTR_ERR(sdhci->clk);
dev_dbg(&pdev->dev, "Error getting clock\n");
- goto err_host;
+ goto err;
}
ret = clk_prepare_enable(sdhci->clk);
if (ret) {
dev_dbg(&pdev->dev, "Error enabling clock\n");
- goto err_host;
+ goto err;
}
ret = clk_set_rate(sdhci->clk, 50000000);
@@ -110,14 +110,12 @@ static int sdhci_probe(struct platform_device *pdev)
disable_clk:
clk_disable_unprepare(sdhci->clk);
-err_host:
- sdhci_free_host(host);
err:
dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
return ret;
}
-static int sdhci_remove(struct platform_device *pdev)
+static void sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct spear_sdhci *sdhci = sdhci_priv(host);
@@ -130,12 +128,8 @@ static int sdhci_remove(struct platform_device *pdev)
sdhci_remove_host(host, dead);
clk_disable_unprepare(sdhci->clk);
- sdhci_free_host(host);
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -166,24 +160,21 @@ static int sdhci_resume(struct device *dev)
return sdhci_resume_host(host);
}
-#endif
-static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume);
-#ifdef CONFIG_OF
static const struct of_device_id sdhci_spear_id_table[] = {
{ .compatible = "st,spear300-sdhci" },
{}
};
MODULE_DEVICE_TABLE(of, sdhci_spear_id_table);
-#endif
static struct platform_driver sdhci_driver = {
.driver = {
.name = "sdhci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_pm_ops,
- .of_match_table = of_match_ptr(sdhci_spear_id_table),
+ .pm = pm_sleep_ptr(&sdhci_pm_ops),
+ .of_match_table = sdhci_spear_id_table,
},
.probe = sdhci_probe,
.remove = sdhci_remove,
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 11e375579cfb..3584a2b314a9 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -8,10 +8,11 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
+#include <linux/iopoll.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -39,6 +40,9 @@
#define SDHCI_SPRD_BIT_POSRD_DLY_INV BIT(21)
#define SDHCI_SPRD_BIT_NEGRD_DLY_INV BIT(29)
+#define SDHCI_SPRD_REG_32_DLL_STS0 0x210
+#define SDHCI_SPRD_DLL_LOCKED BIT(18)
+
#define SDHCI_SPRD_REG_32_BUSY_POSI 0x250
#define SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN BIT(25)
#define SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN BIT(24)
@@ -69,6 +73,11 @@
#define SDHCI_SPRD_CLK_DEF_RATE 26000000
#define SDHCI_SPRD_PHY_DLL_CLK 52000000
+#define SDHCI_SPRD_MAX_RANGE 0xff
+#define SDHCI_SPRD_CMD_DLY_MASK GENMASK(15, 8)
+#define SDHCI_SPRD_POSRD_DLY_MASK GENMASK(23, 16)
+#define SDHCI_SPRD_CPST_EN GENMASK(27, 24)
+
struct sdhci_sprd_host {
u32 version;
struct clk *clk_sdio;
@@ -82,6 +91,11 @@ struct sdhci_sprd_host {
u32 phy_delay[MMC_TIMING_MMC_HS400 + 2];
};
+enum sdhci_sprd_tuning_type {
+ SDHCI_SPRD_TUNING_SD_HS_CMD,
+ SDHCI_SPRD_TUNING_SD_HS_DATA,
+};
+
struct sdhci_sprd_phy_cfg {
const char *property;
u8 timing;
@@ -201,14 +215,14 @@ static inline u32 sdhci_sprd_calc_div(u32 base_clk, u32 clk)
if ((base_clk / div) > (clk * 2))
div++;
- if (div > SDHCI_SPRD_CLK_MAX_DIV)
- div = SDHCI_SPRD_CLK_MAX_DIV;
-
if (div % 2)
div = (div + 1) / 2;
else
div = div / 2;
+ if (div > SDHCI_SPRD_CLK_MAX_DIV)
+ div = SDHCI_SPRD_CLK_MAX_DIV;
+
return div;
}
@@ -224,13 +238,19 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
sdhci_enable_clk(host, div);
- /* enable auto gate sdhc_enable_auto_gate */
val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
- mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
- SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
- if (mask != (val & mask)) {
- val |= mask;
- sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
+ mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
+ /* Enable CLK_AUTO when the clock is greater than 400K. */
+ if (clk > 400000) {
+ if (mask != (val & mask)) {
+ val |= mask;
+ sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
+ }
+ } else {
+ if (val & mask) {
+ val &= ~mask;
+ sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
+ }
}
}
@@ -256,6 +276,15 @@ static void sdhci_sprd_enable_phy_dll(struct sdhci_host *host)
sdhci_writel(host, tmp, SDHCI_SPRD_REG_32_DLL_CFG);
/* wait 1ms */
usleep_range(1000, 1250);
+
+ if (read_poll_timeout(sdhci_readl, tmp, (tmp & SDHCI_SPRD_DLL_LOCKED),
+ 2000, USEC_PER_SEC, false, host, SDHCI_SPRD_REG_32_DLL_STS0)) {
+ pr_err("%s: DLL locked fail!\n", mmc_hostname(host->mmc));
+ pr_info("%s: DLL_STS0 : 0x%x, DLL_CFG : 0x%x\n",
+ mmc_hostname(host->mmc),
+ sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_STS0),
+ sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_CFG));
+ }
}
static void sdhci_sprd_set_clock(struct sdhci_host *host, unsigned int clock)
@@ -296,7 +325,7 @@ static unsigned int sdhci_sprd_get_max_clock(struct sdhci_host *host)
static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host)
{
- return 400000;
+ return 100000;
}
static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host,
@@ -390,12 +419,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
mmc_request_done(host->mmc, mrq);
}
-static struct sdhci_ops sdhci_sprd_ops = {
+static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ switch (mode) {
+ case MMC_POWER_OFF:
+ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
+
+ mmc_regulator_disable_vqmmc(mmc);
+ break;
+ case MMC_POWER_ON:
+ mmc_regulator_enable_vqmmc(mmc);
+ break;
+ case MMC_POWER_UP:
+ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
+ break;
+ }
+}
+
+static const struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
.write_w = sdhci_sprd_writew,
.write_b = sdhci_sprd_writeb,
.set_clock = sdhci_sprd_set_clock,
+ .set_power = sdhci_sprd_set_power,
.get_max_clock = sdhci_sprd_get_max_clock,
.get_min_clock = sdhci_sprd_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -457,7 +507,7 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
}
if (IS_ERR(sprd_host->pinctrl))
- return 0;
+ goto reset;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_180:
@@ -485,6 +535,8 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
/* Wait for 300 ~ 500 us for pin state stable */
usleep_range(300, 500);
+
+reset:
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
return 0;
@@ -516,6 +568,139 @@ static void sdhci_sprd_hs400_enhanced_strobe(struct mmc_host *mmc,
SDHCI_SPRD_REG_32_DLL_DLY);
}
+static int mmc_send_tuning_cmd(struct mmc_card *card)
+{
+ return mmc_send_status(card, NULL);
+}
+
+static int mmc_send_tuning_data(struct mmc_card *card)
+{
+ u8 *status;
+ int ret;
+
+ status = kmalloc(64, GFP_KERNEL);
+ if (!status)
+ return -ENOMEM;
+
+ ret = mmc_sd_switch(card, 0, 0, 0, status);
+
+ kfree(status);
+
+ return ret;
+}
+
+static int sdhci_sprd_get_best_clk_sample(struct mmc_host *mmc, u8 *value)
+{
+ int range_end = SDHCI_SPRD_MAX_RANGE;
+ int range_length = 0;
+ int middle_range = 0;
+ int count = 0;
+ int i;
+
+ for (i = 0; i <= SDHCI_SPRD_MAX_RANGE; i++) {
+ if (value[i]) {
+ pr_debug("%s: tuning ok: %d\n", mmc_hostname(mmc), i);
+ count++;
+ } else {
+ pr_debug("%s: tuning fail: %d\n", mmc_hostname(mmc), i);
+ if (range_length < count) {
+ range_length = count;
+ range_end = i - 1;
+ count = 0;
+ }
+ }
+ }
+
+ if (!count)
+ return -EIO;
+
+ if (count > range_length) {
+ range_length = count;
+ range_end = i - 1;
+ }
+
+ middle_range = range_end - (range_length - 1) / 2;
+
+ return middle_range;
+}
+
+static int sdhci_sprd_tuning(struct mmc_host *mmc, struct mmc_card *card,
+ enum sdhci_sprd_tuning_type type)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+ u32 *p = sprd_host->phy_delay;
+ u32 dll_cfg, dll_dly;
+ int best_clk_sample;
+ int err = 0;
+ u8 *value;
+ int i;
+
+ value = kmalloc(SDHCI_SPRD_MAX_RANGE + 1, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ dll_cfg = sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_CFG);
+ dll_cfg &= ~SDHCI_SPRD_CPST_EN;
+ sdhci_writel(host, dll_cfg, SDHCI_SPRD_REG_32_DLL_CFG);
+
+ dll_dly = p[mmc->ios.timing];
+
+ for (i = 0; i <= SDHCI_SPRD_MAX_RANGE; i++) {
+ if (type == SDHCI_SPRD_TUNING_SD_HS_CMD) {
+ dll_dly &= ~SDHCI_SPRD_CMD_DLY_MASK;
+ dll_dly |= ((i << 8) & SDHCI_SPRD_CMD_DLY_MASK);
+ } else {
+ dll_dly &= ~SDHCI_SPRD_POSRD_DLY_MASK;
+ dll_dly |= ((i << 16) & SDHCI_SPRD_POSRD_DLY_MASK);
+ }
+
+ sdhci_writel(host, dll_dly, SDHCI_SPRD_REG_32_DLL_DLY);
+
+ if (type == SDHCI_SPRD_TUNING_SD_HS_CMD)
+ value[i] = !mmc_send_tuning_cmd(card);
+ else
+ value[i] = !mmc_send_tuning_data(card);
+ }
+
+ best_clk_sample = sdhci_sprd_get_best_clk_sample(mmc, value);
+ if (best_clk_sample < 0) {
+ dev_err(mmc_dev(host->mmc), "all tuning phase fail!\n");
+ err = best_clk_sample;
+ goto out;
+ }
+
+ if (type == SDHCI_SPRD_TUNING_SD_HS_CMD) {
+ p[mmc->ios.timing] &= ~SDHCI_SPRD_CMD_DLY_MASK;
+ p[mmc->ios.timing] |= ((best_clk_sample << 8) & SDHCI_SPRD_CMD_DLY_MASK);
+ } else {
+ p[mmc->ios.timing] &= ~(SDHCI_SPRD_POSRD_DLY_MASK);
+ p[mmc->ios.timing] |= ((best_clk_sample << 16) & SDHCI_SPRD_POSRD_DLY_MASK);
+ }
+
+ pr_debug("%s: the best clk sample %d, delay value 0x%08x\n",
+ mmc_hostname(host->mmc), best_clk_sample, p[mmc->ios.timing]);
+
+out:
+ sdhci_writel(host, p[mmc->ios.timing], SDHCI_SPRD_REG_32_DLL_DLY);
+
+ kfree(value);
+
+ return err;
+}
+
+static int sdhci_sprd_prepare_sd_hs_cmd_tuning(struct mmc_host *mmc, struct mmc_card *card)
+{
+ return sdhci_sprd_tuning(mmc, card, SDHCI_SPRD_TUNING_SD_HS_CMD);
+}
+
+static int sdhci_sprd_execute_sd_hs_data_tuning(struct mmc_host *mmc, struct mmc_card *card)
+{
+ return sdhci_sprd_tuning(mmc, card, SDHCI_SPRD_TUNING_SD_HS_DATA);
+}
+
static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host,
struct device_node *np)
{
@@ -536,8 +721,7 @@ static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host,
static const struct sdhci_pltfm_data sdhci_sprd_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
- SDHCI_QUIRK_MISSING_CAPS,
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
.quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
SDHCI_QUIRK2_USE_32BIT_BLK_CNT |
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
@@ -561,6 +745,11 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
host->mmc_host_ops.request = sdhci_sprd_request;
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_sprd_hs400_enhanced_strobe;
+ host->mmc_host_ops.prepare_sd_hs_tuning =
+ sdhci_sprd_prepare_sd_hs_cmd_tuning;
+ host->mmc_host_ops.execute_sd_hs_tuning =
+ sdhci_sprd_execute_sd_hs_data_tuning;
+
/*
* We can not use the standard ops to change and detect the voltage
* signal for Spreadtrum SD host controller, since our voltage regulator
@@ -575,7 +764,7 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
ret = mmc_of_parse(host->mmc);
if (ret)
- goto pltfm_free;
+ return ret;
if (!mmc_card_is_removable(host->mmc))
host->mmc_host_ops.request_atomic = sdhci_sprd_request_atomic;
@@ -589,34 +778,26 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
if (!IS_ERR(sprd_host->pinctrl)) {
sprd_host->pins_uhs =
pinctrl_lookup_state(sprd_host->pinctrl, "state_uhs");
- if (IS_ERR(sprd_host->pins_uhs)) {
- ret = PTR_ERR(sprd_host->pins_uhs);
- goto pltfm_free;
- }
+ if (IS_ERR(sprd_host->pins_uhs))
+ return PTR_ERR(sprd_host->pins_uhs);
sprd_host->pins_default =
pinctrl_lookup_state(sprd_host->pinctrl, "default");
- if (IS_ERR(sprd_host->pins_default)) {
- ret = PTR_ERR(sprd_host->pins_default);
- goto pltfm_free;
- }
+ if (IS_ERR(sprd_host->pins_default))
+ return PTR_ERR(sprd_host->pins_default);
}
clk = devm_clk_get(&pdev->dev, "sdio");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto pltfm_free;
- }
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
sprd_host->clk_sdio = clk;
sprd_host->base_rate = clk_get_rate(sprd_host->clk_sdio);
if (!sprd_host->base_rate)
sprd_host->base_rate = SDHCI_SPRD_CLK_DEF_RATE;
clk = devm_clk_get(&pdev->dev, "enable");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto pltfm_free;
- }
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
sprd_host->clk_enable = clk;
clk = devm_clk_get(&pdev->dev, "2x_enable");
@@ -625,7 +806,7 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
ret = clk_prepare_enable(sprd_host->clk_sdio);
if (ret)
- goto pltfm_free;
+ return ret;
ret = clk_prepare_enable(sprd_host->clk_enable);
if (ret)
@@ -654,11 +835,14 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
* will allow these modes to be specified only by device
* tree properties through mmc_of_parse().
*/
- host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
- host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ sdhci_read_caps(host);
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50);
+ ret = mmc_regulator_get_supply(host->mmc);
+ if (ret)
+ goto pm_runtime_disable;
+
ret = sdhci_setup_host(host);
if (ret)
goto pm_runtime_disable;
@@ -679,7 +863,6 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
if (ret)
goto err_cleanup_host;
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -699,13 +882,10 @@ clk_disable2:
clk_disable:
clk_disable_unprepare(sprd_host->clk_sdio);
-
-pltfm_free:
- sdhci_pltfm_free(pdev);
return ret;
}
-static int sdhci_sprd_remove(struct platform_device *pdev)
+static void sdhci_sprd_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
@@ -715,10 +895,6 @@ static int sdhci_sprd_remove(struct platform_device *pdev)
clk_disable_unprepare(sprd_host->clk_sdio);
clk_disable_unprepare(sprd_host->clk_enable);
clk_disable_unprepare(sprd_host->clk_2x_enable);
-
- sdhci_pltfm_free(pdev);
-
- return 0;
}
static const struct of_device_id sdhci_sprd_of_match[] = {
@@ -727,7 +903,6 @@ static const struct of_device_id sdhci_sprd_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sdhci_sprd_of_match);
-#ifdef CONFIG_PM
static int sdhci_sprd_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -774,13 +949,10 @@ clk_2x_disable:
return ret;
}
-#endif
static const struct dev_pm_ops sdhci_sprd_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(sdhci_sprd_runtime_suspend,
- sdhci_sprd_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(sdhci_sprd_runtime_suspend, sdhci_sprd_runtime_resume, NULL)
};
static struct platform_driver sdhci_sprd_driver = {
@@ -790,7 +962,7 @@ static struct platform_driver sdhci_sprd_driver = {
.name = "sdhci_sprd_r11",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_sprd_of_match,
- .pm = &sdhci_sprd_pm_ops,
+ .pm = pm_ptr(&sdhci_sprd_pm_ops),
},
};
module_platform_driver(sdhci_sprd_driver);
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index d41582c21aa3..bf6685805137 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -348,7 +348,6 @@ static int sdhci_st_probe(struct platform_device *pdev)
struct clk *clk, *icnclk;
int ret = 0;
u16 host_version;
- struct resource *res;
struct reset_control *rstc;
clk = devm_clk_get(&pdev->dev, "mmc");
@@ -381,13 +380,13 @@ static int sdhci_st_probe(struct platform_device *pdev)
ret = mmc_of_parse(host->mmc);
if (ret) {
dev_err(&pdev->dev, "Failed mmc_of_parse\n");
- goto err_of;
+ goto err_pltfm_init;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&pdev->dev, "Failed to prepare clock\n");
- goto err_of;
+ goto err_pltfm_init;
}
ret = clk_prepare_enable(icnclk);
@@ -397,9 +396,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
}
/* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "top-mmc-delay");
- pdata->top_ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->top_ioaddr = devm_platform_ioremap_resource_byname(pdev, "top-mmc-delay");
if (IS_ERR(pdata->top_ioaddr))
pdata->top_ioaddr = NULL;
@@ -426,32 +423,28 @@ err_out:
clk_disable_unprepare(icnclk);
err_icnclk:
clk_disable_unprepare(clk);
-err_of:
- sdhci_pltfm_free(pdev);
err_pltfm_init:
reset_control_assert(rstc);
return ret;
}
-static int sdhci_st_remove(struct platform_device *pdev)
+static void sdhci_st_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
struct reset_control *rstc = pdata->rstc;
- int ret;
+ struct clk *clk = pltfm_host->clk;
- ret = sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_remove(pdev);
clk_disable_unprepare(pdata->icnclk);
+ clk_disable_unprepare(clk);
reset_control_assert(rstc);
-
- return ret;
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_st_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -498,9 +491,8 @@ static int sdhci_st_resume(struct device *dev)
return sdhci_resume_host(host);
}
-#endif
-static SIMPLE_DEV_PM_OPS(sdhci_st_pmops, sdhci_st_suspend, sdhci_st_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sdhci_st_pmops, sdhci_st_suspend, sdhci_st_resume);
static const struct of_device_id st_sdhci_match[] = {
{ .compatible = "st,sdhci" },
@@ -515,7 +507,7 @@ static struct platform_driver sdhci_st_driver = {
.driver = {
.name = "sdhci-st",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_st_pmops,
+ .pm = pm_sleep_ptr(&sdhci_st_pmops),
.of_match_table = st_sdhci_match,
},
};
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 387ce9cdbd7c..820ce4dae58b 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -3,27 +3,33 @@
* Copyright (C) 2010 Google, Inc.
*/
+#include <linux/bitfield.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
-#include <linux/iopoll.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/regulator/consumer.h>
-#include <linux/reset.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/ktime.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
-#include <linux/gpio/consumer.h>
-#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <soc/tegra/common.h>
+
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -90,6 +96,8 @@
#define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec
#define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31)
+#define SDHCI_TEGRA_CIF2AXI_CTRL_0 0x1fc
+
#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
@@ -116,6 +124,9 @@
*/
#define NVQUIRK_HAS_TMCLK BIT(10)
+#define NVQUIRK_HAS_ANDROID_GPT_SECTOR BIT(11)
+#define NVQUIRK_PROGRAM_STREAMID BIT(12)
+
/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
@@ -171,6 +182,7 @@ struct sdhci_tegra {
bool enable_hwcq;
unsigned long curr_clk_rate;
u8 tuned_tap_delay;
+ u32 stream_id;
};
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
@@ -261,13 +273,9 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
{
bool is_tuning_cmd = 0;
bool clk_enabled;
- u8 cmd;
- if (reg == SDHCI_COMMAND) {
- cmd = SDHCI_GET_CMD(val);
- is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
- cmd == MMC_SEND_TUNING_BLOCK_HS200;
- }
+ if (reg == SDHCI_COMMAND)
+ is_tuning_cmd = mmc_op_tuning(SDHCI_GET_CMD(val));
if (is_tuning_cmd)
clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
@@ -354,23 +362,6 @@ static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
}
}
-static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
- struct mmc_ios *ios)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- u32 val;
-
- val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
-
- if (ios->enhanced_strobe)
- val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
- else
- val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
-
- sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
-
-}
-
static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -378,7 +369,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
u32 misc_ctrl, clk_ctrl, pad_ctrl;
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
if (!(mask & SDHCI_RESET_ALL))
return;
@@ -758,7 +749,9 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ struct device *dev = mmc_dev(host->mmc);
unsigned long host_clk;
+ int err;
if (!clock)
return sdhci_set_clock(host, clock);
@@ -776,8 +769,13 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
* from clk_get_rate() is used.
*/
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
- clk_set_rate(pltfm_host->clk, host_clk);
- tegra_host->curr_clk_rate = host_clk;
+
+ err = dev_pm_opp_set_rate(dev, host_clk);
+ if (err)
+ dev_err(dev, "failed to set clk rate to %luHz: %d\n",
+ host_clk, err);
+
+ tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
if (tegra_host->ddr_signaling)
host->max_clk = host_clk;
else
@@ -791,6 +789,32 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
}
}
+static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 val;
+
+ val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+
+ if (ios->enhanced_strobe) {
+ val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+ /*
+ * When CMD13 is sent from mmc_select_hs400es() after
+ * switching to HS400ES mode, the bus is operating at
+ * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
+ * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
+ * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
+ * controller CAR clock and the interface clock are rate matched.
+ */
+ tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
+ } else {
+ val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+ }
+
+ sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+}
+
static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1361,6 +1385,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
.pdata = &sdhci_tegra20_pdata,
.dma_mask = DMA_BIT_MASK(32),
.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
+ NVQUIRK_HAS_ANDROID_GPT_SECTOR |
NVQUIRK_ENABLE_BLOCK_GAP_DET,
};
@@ -1390,6 +1415,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
NVQUIRK_ENABLE_SDR50 |
NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_ANDROID_GPT_SECTOR |
NVQUIRK_HAS_PADCALIB,
};
@@ -1422,6 +1448,7 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
.dma_mask = DMA_BIT_MASK(32),
+ .nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
};
static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
@@ -1438,6 +1465,7 @@ static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
.pdata = &sdhci_tegra124_pdata,
.dma_mask = DMA_BIT_MASK(34),
+ .nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
};
static const struct sdhci_ops tegra210_sdhci_ops = {
@@ -1497,9 +1525,9 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
- SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER,
.ops = &tegra186_sdhci_ops,
};
@@ -1530,7 +1558,22 @@ static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
.max_tap_delay = 139,
};
+static const struct sdhci_tegra_soc_data soc_data_tegra234 = {
+ .pdata = &sdhci_tegra186_pdata,
+ .dma_mask = DMA_BIT_MASK(39),
+ .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
+ NVQUIRK_HAS_PADCALIB |
+ NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
+ NVQUIRK_ENABLE_SDR50 |
+ NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_PROGRAM_STREAMID |
+ NVQUIRK_HAS_TMCLK,
+ .min_tap_delay = 95,
+ .max_tap_delay = 111,
+};
+
static const struct of_device_id sdhci_tegra_dt_match[] = {
+ { .compatible = "nvidia,tegra234-sdhci", .data = &soc_data_tegra234 },
{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
@@ -1590,9 +1633,21 @@ cleanup:
return ret;
}
+/* Program MC streamID for DMA transfers */
+static void sdhci_tegra_program_stream_id(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID) {
+ tegra_sdhci_writel(host, FIELD_PREP(GENMASK(15, 8), tegra_host->stream_id) |
+ FIELD_PREP(GENMASK(7, 0), tegra_host->stream_id),
+ SDHCI_TEGRA_CIF2AXI_CTRL_0);
+ }
+}
+
static int sdhci_tegra_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct sdhci_tegra_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
@@ -1600,10 +1655,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
struct clk *clk;
int rc;
- match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
- if (!match)
+ soc_data = of_device_get_match_data(&pdev->dev);
+ if (!soc_data)
return -EINVAL;
- soc_data = match->data;
host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
if (IS_ERR(host))
@@ -1616,6 +1670,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
tegra_host->pad_control_available = false;
tegra_host->soc_data = soc_data;
+ if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
+ host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
+
if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
if (rc == 0)
@@ -1636,7 +1693,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
rc = mmc_of_parse(host->mmc);
if (rc)
- goto err_parse_dt;
+ return rc;
if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
host->mmc->caps |= MMC_CAP_1_8V_DDR;
@@ -1644,8 +1701,17 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
/* HW busy detection is supported, but R1B responses are required. */
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+ /* GPIO CD can be set as a wakeup source */
+ host->mmc->caps |= MMC_CAP_CD_WAKE;
+
tegra_sdhci_parse_dt(host);
+ if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID &&
+ !tegra_dev_iommu_get_stream_id(&pdev->dev, &tegra_host->stream_id)) {
+ dev_warn(mmc_dev(host->mmc), "missing IOMMU stream ID\n");
+ tegra_host->stream_id = 0x7f;
+ }
+
tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
GPIOD_OUT_HIGH);
if (IS_ERR(tegra_host->power_gpio)) {
@@ -1673,7 +1739,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
if (IS_ERR(clk)) {
rc = PTR_ERR(clk);
if (rc == -EPROBE_DEFER)
- goto err_power_req;
+ return rc;
dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
clk = NULL;
@@ -1684,7 +1750,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
if (rc) {
dev_err(&pdev->dev,
"failed to enable tmclk: %d\n", rc);
- goto err_power_req;
+ return rc;
}
tegra_host->tmclk = clk;
@@ -1696,7 +1762,6 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
"failed to get clock\n");
goto err_clk_get;
}
- clk_prepare_enable(clk);
pltfm_host->clk = clk;
tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
@@ -1707,15 +1772,24 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
goto err_rst_get;
}
- rc = reset_control_assert(tegra_host->rst);
+ rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (rc)
goto err_rst_get;
+ pm_runtime_enable(&pdev->dev);
+ rc = pm_runtime_resume_and_get(&pdev->dev);
+ if (rc)
+ goto err_pm_get;
+
+ rc = reset_control_assert(tegra_host->rst);
+ if (rc)
+ goto err_rst_assert;
+
usleep_range(2000, 4000);
rc = reset_control_deassert(tegra_host->rst);
if (rc)
- goto err_rst_get;
+ goto err_rst_assert;
usleep_range(2000, 4000);
@@ -1723,21 +1797,24 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
if (rc)
goto err_add_host;
+ sdhci_tegra_program_stream_id(host);
+
return 0;
err_add_host:
reset_control_assert(tegra_host->rst);
+err_rst_assert:
+ pm_runtime_put_sync_suspend(&pdev->dev);
+err_pm_get:
+ pm_runtime_disable(&pdev->dev);
err_rst_get:
- clk_disable_unprepare(pltfm_host->clk);
err_clk_get:
clk_disable_unprepare(tegra_host->tmclk);
err_power_req:
-err_parse_dt:
- sdhci_pltfm_free(pdev);
return rc;
}
-static int sdhci_tegra_remove(struct platform_device *pdev)
+static void sdhci_tegra_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1747,19 +1824,34 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
reset_control_assert(tegra_host->rst);
usleep_range(2000, 4000);
- clk_disable_unprepare(pltfm_host->clk);
+
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
+
clk_disable_unprepare(tegra_host->tmclk);
+}
+
+static int sdhci_tegra_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- sdhci_pltfm_free(pdev);
+ clk_disable_unprepare(pltfm_host->clk);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
+static int sdhci_tegra_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_prepare_enable(pltfm_host->clk);
+}
+
+static int sdhci_tegra_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
int ret;
if (host->mmc->caps2 & MMC_CAP2_CQE) {
@@ -1774,20 +1866,31 @@ static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
return ret;
}
- clk_disable_unprepare(pltfm_host->clk);
- return 0;
+ ret = pm_runtime_force_suspend(dev);
+ if (ret) {
+ sdhci_resume_host(host);
+ cqhci_resume(host->mmc);
+ return ret;
+ }
+
+ return mmc_gpio_set_cd_wake(host->mmc, true);
}
-static int __maybe_unused sdhci_tegra_resume(struct device *dev)
+static int sdhci_tegra_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
int ret;
- ret = clk_prepare_enable(pltfm_host->clk);
+ ret = mmc_gpio_set_cd_wake(host->mmc, false);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
+ sdhci_tegra_program_stream_id(host);
+
ret = sdhci_resume_host(host);
if (ret)
goto disable_clk;
@@ -1803,20 +1906,21 @@ static int __maybe_unused sdhci_tegra_resume(struct device *dev)
suspend_host:
sdhci_suspend_host(host);
disable_clk:
- clk_disable_unprepare(pltfm_host->clk);
+ pm_runtime_force_suspend(dev);
return ret;
}
-#endif
-static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
- sdhci_tegra_resume);
+static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
+ RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
+};
static struct platform_driver sdhci_tegra_driver = {
.driver = {
.name = "sdhci-tegra",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_tegra_dt_match,
- .pm = &sdhci_tegra_dev_pm_ops,
+ .pm = pm_ptr(&sdhci_tegra_dev_pm_ops),
},
.probe = sdhci_tegra_probe,
.remove = sdhci_tegra_remove,
diff --git a/drivers/mmc/host/sdhci-uhs2.c b/drivers/mmc/host/sdhci-uhs2.c
new file mode 100644
index 000000000000..c459a08d01da
--- /dev/null
+++ b/drivers/mmc/host/sdhci-uhs2.c
@@ -0,0 +1,1251 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/drivers/mmc/host/sdhci_uhs2.c - Secure Digital Host Controller
+ * Interface driver
+ *
+ * Copyright (C) 2014 Intel Corp, All Rights Reserved.
+ * Copyright (C) 2020 Genesys Logic, Inc.
+ * Authors: Ben Chuang <ben.chuang@genesyslogic.com.tw>
+ * Copyright (C) 2020 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/bitfield.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+
+#include "sdhci.h"
+#include "sdhci-uhs2.h"
+
+#define DRIVER_NAME "sdhci_uhs2"
+#define DBG(f, x...) \
+ pr_debug(DRIVER_NAME " [%s()]: " f, __func__, ## x)
+#define SDHCI_UHS2_DUMP(f, x...) \
+ pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
+
+#define UHS2_RESET_TIMEOUT_100MS 100000
+#define UHS2_CHECK_DORMANT_TIMEOUT_100MS 100000
+#define UHS2_INTERFACE_DETECT_TIMEOUT_100MS 100000
+#define UHS2_LANE_SYNC_TIMEOUT_150MS 150000
+
+#define UHS2_ARG_IOADR_MASK 0xfff
+
+void sdhci_uhs2_dump_regs(struct sdhci_host *host)
+{
+ if (!(mmc_card_uhs2(host->mmc)))
+ return;
+
+ SDHCI_UHS2_DUMP("==================== UHS2 ==================\n");
+ SDHCI_UHS2_DUMP("Blk Size: 0x%08x | Blk Cnt: 0x%08x\n",
+ sdhci_readw(host, SDHCI_UHS2_BLOCK_SIZE),
+ sdhci_readl(host, SDHCI_UHS2_BLOCK_COUNT));
+ SDHCI_UHS2_DUMP("Cmd: 0x%08x | Trn mode: 0x%08x\n",
+ sdhci_readw(host, SDHCI_UHS2_CMD),
+ sdhci_readw(host, SDHCI_UHS2_TRANS_MODE));
+ SDHCI_UHS2_DUMP("Int Stat: 0x%08x | Dev Sel : 0x%08x\n",
+ sdhci_readw(host, SDHCI_UHS2_DEV_INT_STATUS),
+ sdhci_readb(host, SDHCI_UHS2_DEV_SELECT));
+ SDHCI_UHS2_DUMP("Dev Int Code: 0x%08x\n",
+ sdhci_readb(host, SDHCI_UHS2_DEV_INT_CODE));
+ SDHCI_UHS2_DUMP("Reset: 0x%08x | Timer: 0x%08x\n",
+ sdhci_readw(host, SDHCI_UHS2_SW_RESET),
+ sdhci_readw(host, SDHCI_UHS2_TIMER_CTRL));
+ SDHCI_UHS2_DUMP("ErrInt: 0x%08x | ErrIntEn: 0x%08x\n",
+ sdhci_readl(host, SDHCI_UHS2_INT_STATUS),
+ sdhci_readl(host, SDHCI_UHS2_INT_STATUS_ENABLE));
+ SDHCI_UHS2_DUMP("ErrSigEn: 0x%08x\n",
+ sdhci_readl(host, SDHCI_UHS2_INT_SIGNAL_ENABLE));
+}
+EXPORT_SYMBOL_GPL(sdhci_uhs2_dump_regs);
+
+/*****************************************************************************\
+ * *
+ * Low level functions *
+ * *
+\*****************************************************************************/
+
+static inline u16 uhs2_dev_cmd(struct mmc_command *cmd)
+{
+ return be16_to_cpu((__force __be16)cmd->uhs2_cmd->arg) & UHS2_ARG_IOADR_MASK;
+}
+
+static inline int mmc_opt_regulator_set_ocr(struct mmc_host *mmc,
+ struct regulator *supply,
+ unsigned short vdd_bit)
+{
+ return IS_ERR_OR_NULL(supply) ? 0 : mmc_regulator_set_ocr(mmc, supply, vdd_bit);
+}
+
+/**
+ * sdhci_uhs2_reset - invoke SW reset
+ * @host: SDHCI host
+ * @mask: Control mask
+ *
+ * Invoke SW reset, depending on a bit in @mask and wait for completion.
+ */
+void sdhci_uhs2_reset(struct sdhci_host *host, u16 mask)
+{
+ u32 val;
+
+ sdhci_writew(host, mask, SDHCI_UHS2_SW_RESET);
+
+ if (mask & SDHCI_UHS2_SW_RESET_FULL)
+ host->clock = 0;
+
+ /* hw clears the bit when it's done */
+ if (read_poll_timeout_atomic(sdhci_readw, val, !(val & mask), 10,
+ UHS2_RESET_TIMEOUT_100MS, true, host, SDHCI_UHS2_SW_RESET)) {
+ pr_debug("%s: %s: Reset 0x%x never completed. %s: clean reset bit.\n", __func__,
+ mmc_hostname(host->mmc), (int)mask, mmc_hostname(host->mmc));
+ sdhci_writeb(host, 0, SDHCI_UHS2_SW_RESET);
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(sdhci_uhs2_reset);
+
+static void sdhci_uhs2_reset_cmd_data(struct sdhci_host *host)
+{
+ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ if (host->mmc->uhs2_sd_tran) {
+ sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_ALL_MASK, SDHCI_UHS2_INT_ERROR_MASK);
+ }
+}
+
+void sdhci_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+ u8 pwr = 0;
+
+ if (mode != MMC_POWER_OFF) {
+ pwr = sdhci_get_vdd_value(vdd);
+ if (!pwr)
+ WARN(1, "%s: Invalid vdd %#x\n",
+ mmc_hostname(host->mmc), vdd);
+ pwr |= SDHCI_VDD2_POWER_180;
+ }
+
+ if (host->pwr == pwr)
+ return;
+ host->pwr = pwr;
+
+ if (pwr == 0) {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+ mmc_opt_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ mmc_regulator_set_vqmmc2(mmc, &mmc->ios);
+ } else {
+ mmc_opt_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ /* support 1.8v only for now */
+ mmc_regulator_set_vqmmc2(mmc, &mmc->ios);
+
+ /* Clear the power reg before setting a new value */
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+ /* vdd first */
+ pwr |= SDHCI_POWER_ON;
+ sdhci_writeb(host, pwr & 0xf, SDHCI_POWER_CONTROL);
+ mdelay(5);
+
+ pwr |= SDHCI_VDD2_POWER_ON;
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ mdelay(5);
+ }
+}
+EXPORT_SYMBOL_GPL(sdhci_uhs2_set_power);
+
+static u8 sdhci_calc_timeout_uhs2(struct sdhci_host *host, u8 *cmd_res, u8 *dead_lock)
+{
+ /* timeout in us */
+ unsigned int dead_lock_timeout = 1 * 1000 * 1000;
+ unsigned int cmd_res_timeout = 5 * 1000;
+ unsigned int current_timeout;
+ u8 count;
+
+ /*
+ * Figure out needed cycles.
+ * We do this in steps in order to fit inside a 32 bit int.
+ * The first step is the minimum timeout, which will have a
+ * minimum resolution of 6 bits:
+ * (1) 2^13*1000 > 2^22,
+ * (2) host->timeout_clk < 2^16
+ * =>
+ * (1) / (2) > 2^6
+ */
+ count = 0;
+ current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+ while (current_timeout < cmd_res_timeout) {
+ count++;
+ current_timeout <<= 1;
+ if (count >= 0xF)
+ break;
+ }
+
+ if (count >= 0xF) {
+ DBG("%s: Too large timeout 0x%x requested for CMD_RES!\n",
+ mmc_hostname(host->mmc), count);
+ count = 0xE;
+ }
+ *cmd_res = count;
+
+ count = 0;
+ current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+ while (current_timeout < dead_lock_timeout) {
+ count++;
+ current_timeout <<= 1;
+ if (count >= 0xF)
+ break;
+ }
+
+ if (count >= 0xF) {
+ DBG("%s: Too large timeout 0x%x requested for DEADLOCK!\n",
+ mmc_hostname(host->mmc), count);
+ count = 0xE;
+ }
+ *dead_lock = count;
+
+ return count;
+}
+
+static void __sdhci_uhs2_set_timeout(struct sdhci_host *host)
+{
+ u8 cmd_res, dead_lock;
+
+ sdhci_calc_timeout_uhs2(host, &cmd_res, &dead_lock);
+ cmd_res |= FIELD_PREP(SDHCI_UHS2_TIMER_CTRL_DEADLOCK_MASK, dead_lock);
+ sdhci_writeb(host, cmd_res, SDHCI_UHS2_TIMER_CTRL);
+}
+
+void sdhci_uhs2_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ __sdhci_set_timeout(host, cmd);
+
+ if (mmc_card_uhs2(host->mmc))
+ __sdhci_uhs2_set_timeout(host);
+}
+EXPORT_SYMBOL_GPL(sdhci_uhs2_set_timeout);
+
+/**
+ * sdhci_uhs2_clear_set_irqs - set Error Interrupt Status Enable register
+ * @host: SDHCI host
+ * @clear: bit-wise clear mask
+ * @set: bit-wise set mask
+ *
+ * Set/unset bits in UHS-II Error Interrupt Status Enable register
+ */
+void sdhci_uhs2_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
+{
+ u32 ier;
+
+ ier = sdhci_readl(host, SDHCI_UHS2_INT_STATUS_ENABLE);
+ ier &= ~clear;
+ ier |= set;
+ sdhci_writel(host, ier, SDHCI_UHS2_INT_STATUS_ENABLE);
+ sdhci_writel(host, ier, SDHCI_UHS2_INT_SIGNAL_ENABLE);
+}
+EXPORT_SYMBOL_GPL(sdhci_uhs2_clear_set_irqs);
+
+static void __sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u8 cmd_res, dead_lock;
+ u16 ctrl_2;
+
+ /* UHS2 Timeout Control */
+ sdhci_calc_timeout_uhs2(host, &cmd_res, &dead_lock);
+
+ /* change to use calculate value */
+ cmd_res |= FIELD_PREP(SDHCI_UHS2_TIMER_CTRL_DEADLOCK_MASK, dead_lock);
+
+ sdhci_uhs2_clear_set_irqs(host,
+ SDHCI_UHS2_INT_CMD_TIMEOUT |
+ SDHCI_UHS2_INT_DEADLOCK_TIMEOUT,
+ 0);
+ sdhci_writeb(host, cmd_res, SDHCI_UHS2_TIMER_CTRL);
+ sdhci_uhs2_clear_set_irqs(host, 0,
+ SDHCI_UHS2_INT_CMD_TIMEOUT |
+ SDHCI_UHS2_INT_DEADLOCK_TIMEOUT);
+
+ /* UHS2 timing. Note, UHS2 timing is disabled when powering off */
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (ios->power_mode != MMC_POWER_OFF &&
+ (ios->timing == MMC_TIMING_UHS2_SPEED_A ||
+ ios->timing == MMC_TIMING_UHS2_SPEED_A_HD ||
+ ios->timing == MMC_TIMING_UHS2_SPEED_B ||
+ ios->timing == MMC_TIMING_UHS2_SPEED_B_HD))
+ ctrl_2 |= SDHCI_CTRL_UHS2 | SDHCI_CTRL_UHS2_ENABLE;
+ else
+ ctrl_2 &= ~(SDHCI_CTRL_UHS2 | SDHCI_CTRL_UHS2_ENABLE);
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+ host->timing = ios->timing;
+
+ if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
+ sdhci_enable_preset_value(host, true);
+
+ if (host->ops->set_power)
+ host->ops->set_power(host, ios->power_mode, ios->vdd);
+ else
+ sdhci_uhs2_set_power(host, ios->power_mode, ios->vdd);
+
+ host->ops->set_clock(host, ios->clock);
+ host->clock = ios->clock;
+}
+
+static int sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ pr_debug("%s: clock %uHz powermode %u Vdd %u timing %u\n",
+ mmc_hostname(mmc), ios->clock, ios->power_mode, ios->vdd, ios->timing);
+
+ if (!mmc_card_uhs2(mmc)) {
+ sdhci_set_ios(mmc, ios);
+ return 0;
+ }
+
+ if (ios->power_mode == MMC_POWER_UNDEFINED)
+ return 0;
+
+ if (host->flags & SDHCI_DEVICE_DEAD) {
+ if (ios->power_mode == MMC_POWER_OFF) {
+ mmc_opt_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ mmc_regulator_set_vqmmc2(mmc, ios);
+ }
+ return -1;
+ }
+
+ sdhci_set_ios_common(mmc, ios);
+
+ __sdhci_uhs2_set_ios(mmc, ios);
+
+ return 0;
+}
+
+static int sdhci_uhs2_interface_detect(struct sdhci_host *host)
+{
+ u32 val;
+
+ if (read_poll_timeout(sdhci_readl, val, (val & SDHCI_UHS2_IF_DETECT),
+ 100, UHS2_INTERFACE_DETECT_TIMEOUT_100MS, true,
+ host, SDHCI_PRESENT_STATE)) {
+ pr_debug("%s: not detect UHS2 interface in 100ms.\n", mmc_hostname(host->mmc));
+ sdhci_dbg_dumpregs(host, "UHS2 interface detect timeout in 100ms");
+ return -EIO;
+ }
+
+ /* Enable UHS2 error interrupts */
+ sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_ALL_MASK, SDHCI_UHS2_INT_ERROR_MASK);
+
+ if (read_poll_timeout(sdhci_readl, val, (val & SDHCI_UHS2_LANE_SYNC),
+ 100, UHS2_LANE_SYNC_TIMEOUT_150MS, true, host, SDHCI_PRESENT_STATE)) {
+ pr_debug("%s: UHS2 Lane sync fail in 150ms.\n", mmc_hostname(host->mmc));
+ sdhci_dbg_dumpregs(host, "UHS2 Lane sync fail in 150ms");
+ return -EIO;
+ }
+
+ DBG("%s: UHS2 Lane synchronized in UHS2 mode, PHY is initialized.\n",
+ mmc_hostname(host->mmc));
+ return 0;
+}
+
+static int sdhci_uhs2_init(struct sdhci_host *host)
+{
+ u16 caps_ptr = 0;
+ u32 caps_gen = 0;
+ u32 caps_phy = 0;
+ u32 caps_tran[2] = {0, 0};
+ struct mmc_host *mmc = host->mmc;
+
+ caps_ptr = sdhci_readw(host, SDHCI_UHS2_CAPS_PTR);
+ if (caps_ptr < 0x100 || caps_ptr > 0x1FF) {
+ pr_err("%s: SDHCI_UHS2_CAPS_PTR(%d) is wrong.\n",
+ mmc_hostname(mmc), caps_ptr);
+ return -ENODEV;
+ }
+ caps_gen = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_OFFSET);
+ caps_phy = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_PHY_OFFSET);
+ caps_tran[0] = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_TRAN_OFFSET);
+ caps_tran[1] = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_TRAN_1_OFFSET);
+
+ /* General Caps */
+ mmc->uhs2_caps.dap = caps_gen & SDHCI_UHS2_CAPS_DAP_MASK;
+ mmc->uhs2_caps.gap = FIELD_GET(SDHCI_UHS2_CAPS_GAP_MASK, caps_gen);
+ mmc->uhs2_caps.n_lanes = FIELD_GET(SDHCI_UHS2_CAPS_LANE_MASK, caps_gen);
+ mmc->uhs2_caps.addr64 = (caps_gen & SDHCI_UHS2_CAPS_ADDR_64) ? 1 : 0;
+ mmc->uhs2_caps.card_type = FIELD_GET(SDHCI_UHS2_CAPS_DEV_TYPE_MASK, caps_gen);
+
+ /* PHY Caps */
+ mmc->uhs2_caps.phy_rev = caps_phy & SDHCI_UHS2_CAPS_PHY_REV_MASK;
+ mmc->uhs2_caps.speed_range = FIELD_GET(SDHCI_UHS2_CAPS_PHY_RANGE_MASK, caps_phy);
+ mmc->uhs2_caps.n_lss_sync = FIELD_GET(SDHCI_UHS2_CAPS_PHY_N_LSS_SYN_MASK, caps_phy);
+ mmc->uhs2_caps.n_lss_dir = FIELD_GET(SDHCI_UHS2_CAPS_PHY_N_LSS_DIR_MASK, caps_phy);
+ if (mmc->uhs2_caps.n_lss_sync == 0)
+ mmc->uhs2_caps.n_lss_sync = 16 << 2;
+ else
+ mmc->uhs2_caps.n_lss_sync <<= 2;
+ if (mmc->uhs2_caps.n_lss_dir == 0)
+ mmc->uhs2_caps.n_lss_dir = 16 << 3;
+ else
+ mmc->uhs2_caps.n_lss_dir <<= 3;
+
+ /* LINK/TRAN Caps */
+ mmc->uhs2_caps.link_rev = caps_tran[0] & SDHCI_UHS2_CAPS_TRAN_LINK_REV_MASK;
+ mmc->uhs2_caps.n_fcu = FIELD_GET(SDHCI_UHS2_CAPS_TRAN_N_FCU_MASK, caps_tran[0]);
+ if (mmc->uhs2_caps.n_fcu == 0)
+ mmc->uhs2_caps.n_fcu = 256;
+ mmc->uhs2_caps.host_type = FIELD_GET(SDHCI_UHS2_CAPS_TRAN_HOST_TYPE_MASK, caps_tran[0]);
+ mmc->uhs2_caps.maxblk_len = FIELD_GET(SDHCI_UHS2_CAPS_TRAN_BLK_LEN_MASK, caps_tran[0]);
+ mmc->uhs2_caps.n_data_gap = caps_tran[1] & SDHCI_UHS2_CAPS_TRAN_1_N_DATA_GAP_MASK;
+
+ return 0;
+}
+
+static int sdhci_uhs2_do_detect_init(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ DBG("Begin do uhs2 detect init.\n");
+
+ if (host->ops->uhs2_pre_detect_init)
+ host->ops->uhs2_pre_detect_init(host);
+
+ if (sdhci_uhs2_interface_detect(host)) {
+ pr_debug("%s: cannot detect UHS2 interface.\n", mmc_hostname(host->mmc));
+ return -EIO;
+ }
+
+ if (sdhci_uhs2_init(host)) {
+ pr_debug("%s: UHS2 init fail.\n", mmc_hostname(host->mmc));
+ return -EIO;
+ }
+
+ /* Init complete, do soft reset and enable UHS2 error irqs. */
+ sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
+ sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_ALL_MASK, SDHCI_UHS2_INT_ERROR_MASK);
+ /*
+ * N.B SDHCI_INT_ENABLE and SDHCI_SIGNAL_ENABLE was cleared
+ * by SDHCI_UHS2_SW_RESET_SD
+ */
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+ return 0;
+}
+
+static int sdhci_uhs2_disable_clk(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u16 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ return 0;
+}
+
+static int sdhci_uhs2_enable_clk(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u16 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ int timeout_us = 20000; /* 20ms */
+ u32 val;
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ if (read_poll_timeout(sdhci_readw, val, (val & SDHCI_CLOCK_INT_STABLE),
+ 10, timeout_us, true, host, SDHCI_CLOCK_CONTROL)) {
+ pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void sdhci_uhs2_set_config(struct sdhci_host *host)
+{
+ u32 value;
+ u16 sdhci_uhs2_set_ptr = sdhci_readw(host, SDHCI_UHS2_SETTINGS_PTR);
+ u16 sdhci_uhs2_gen_set_reg = sdhci_uhs2_set_ptr;
+ u16 sdhci_uhs2_phy_set_reg = sdhci_uhs2_set_ptr + 4;
+ u16 sdhci_uhs2_tran_set_reg = sdhci_uhs2_set_ptr + 8;
+ u16 sdhci_uhs2_tran_set_1_reg = sdhci_uhs2_set_ptr + 12;
+
+ /* Set Gen Settings */
+ value = FIELD_PREP(SDHCI_UHS2_GEN_SETTINGS_N_LANES_MASK, host->mmc->uhs2_caps.n_lanes_set);
+ sdhci_writel(host, value, sdhci_uhs2_gen_set_reg);
+
+ /* Set PHY Settings */
+ value = FIELD_PREP(SDHCI_UHS2_PHY_N_LSS_DIR_MASK, host->mmc->uhs2_caps.n_lss_dir_set) |
+ FIELD_PREP(SDHCI_UHS2_PHY_N_LSS_SYN_MASK, host->mmc->uhs2_caps.n_lss_sync_set);
+ if (host->mmc->ios.timing == MMC_TIMING_UHS2_SPEED_B ||
+ host->mmc->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD)
+ value |= SDHCI_UHS2_PHY_SET_SPEED_B;
+ sdhci_writel(host, value, sdhci_uhs2_phy_set_reg);
+
+ /* Set LINK-TRAN Settings */
+ value = FIELD_PREP(SDHCI_UHS2_TRAN_RETRY_CNT_MASK, host->mmc->uhs2_caps.max_retry_set) |
+ FIELD_PREP(SDHCI_UHS2_TRAN_N_FCU_MASK, host->mmc->uhs2_caps.n_fcu_set);
+ sdhci_writel(host, value, sdhci_uhs2_tran_set_reg);
+ sdhci_writel(host, host->mmc->uhs2_caps.n_data_gap_set, sdhci_uhs2_tran_set_1_reg);
+}
+
+static int sdhci_uhs2_check_dormant(struct sdhci_host *host)
+{
+ u32 val;
+
+ if (read_poll_timeout(sdhci_readl, val, (val & SDHCI_UHS2_IN_DORMANT_STATE),
+ 100, UHS2_CHECK_DORMANT_TIMEOUT_100MS, true, host,
+ SDHCI_PRESENT_STATE)) {
+ pr_debug("%s: UHS2 IN_DORMANT fail in 100ms.\n", mmc_hostname(host->mmc));
+ sdhci_dbg_dumpregs(host, "UHS2 IN_DORMANT fail in 100ms");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int sdhci_uhs2_control(struct mmc_host *mmc, enum sd_uhs2_operation op)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct mmc_ios *ios = &mmc->ios;
+ int err = 0;
+
+ DBG("Begin uhs2 control, act %d.\n", op);
+
+ switch (op) {
+ case UHS2_PHY_INIT:
+ err = sdhci_uhs2_do_detect_init(mmc);
+ break;
+ case UHS2_SET_CONFIG:
+ sdhci_uhs2_set_config(host);
+ break;
+ case UHS2_ENABLE_INT:
+ sdhci_uhs2_clear_set_irqs(host, 0, SDHCI_INT_CARD_INT);
+ break;
+ case UHS2_DISABLE_INT:
+ sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_CARD_INT, 0);
+ break;
+ case UHS2_CHECK_DORMANT:
+ err = sdhci_uhs2_check_dormant(host);
+ break;
+ case UHS2_DISABLE_CLK:
+ err = sdhci_uhs2_disable_clk(mmc);
+ break;
+ case UHS2_ENABLE_CLK:
+ err = sdhci_uhs2_enable_clk(mmc);
+ break;
+ case UHS2_SET_IOS:
+ err = sdhci_uhs2_set_ios(mmc, ios);
+ break;
+ default:
+ pr_err("%s: input sd uhs2 operation %d is wrong!\n",
+ mmc_hostname(host->mmc), op);
+ err = -EIO;
+ break;
+ }
+
+ return err;
+}
+
+/*****************************************************************************\
+ * *
+ * Core functions *
+ * *
+\*****************************************************************************/
+
+static void sdhci_uhs2_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ struct mmc_data *data = cmd->data;
+
+ sdhci_initialize_data(host, data);
+
+ sdhci_prepare_dma(host, data);
+
+ sdhci_writew(host, data->blksz, SDHCI_UHS2_BLOCK_SIZE);
+ sdhci_writew(host, data->blocks, SDHCI_UHS2_BLOCK_COUNT);
+}
+
+static void sdhci_uhs2_finish_data(struct sdhci_host *host)
+{
+ struct mmc_data *data = host->data;
+
+ __sdhci_finish_data_common(host, true);
+
+ __sdhci_finish_mrq(host, data->mrq);
+}
+
+static void sdhci_uhs2_set_transfer_mode(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ u16 mode;
+ struct mmc_data *data = cmd->data;
+
+ if (!data) {
+ /* clear Auto CMD settings for no data CMDs */
+ if (uhs2_dev_cmd(cmd) == UHS2_DEV_CMD_TRANS_ABORT) {
+ mode = 0;
+ } else {
+ mode = sdhci_readw(host, SDHCI_UHS2_TRANS_MODE);
+ if (cmd->opcode == MMC_STOP_TRANSMISSION || cmd->opcode == MMC_ERASE)
+ mode |= SDHCI_UHS2_TRNS_WAIT_EBSY;
+ else
+ /* send status mode */
+ if (cmd->opcode == MMC_SEND_STATUS)
+ mode = 0;
+ }
+
+ DBG("UHS2 no data trans mode is 0x%x.\n", mode);
+
+ sdhci_writew(host, mode, SDHCI_UHS2_TRANS_MODE);
+ return;
+ }
+
+ WARN_ON(!host->data);
+
+ mode = SDHCI_UHS2_TRNS_BLK_CNT_EN | SDHCI_UHS2_TRNS_WAIT_EBSY;
+ if (data->flags & MMC_DATA_WRITE)
+ mode |= SDHCI_UHS2_TRNS_DATA_TRNS_WRT;
+
+ if (data->blocks == 1 &&
+ data->blksz != 512 &&
+ cmd->opcode != MMC_READ_SINGLE_BLOCK &&
+ cmd->opcode != MMC_WRITE_BLOCK) {
+ mode &= ~SDHCI_UHS2_TRNS_BLK_CNT_EN;
+ mode |= SDHCI_UHS2_TRNS_BLK_BYTE_MODE;
+ }
+
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ mode |= SDHCI_UHS2_TRNS_DMA;
+
+ if (cmd->uhs2_cmd->tmode_half_duplex)
+ mode |= SDHCI_UHS2_TRNS_2L_HD;
+
+ sdhci_writew(host, mode, SDHCI_UHS2_TRANS_MODE);
+
+ DBG("UHS2 trans mode is 0x%x.\n", mode);
+}
+
+static void __sdhci_uhs2_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ int i, j;
+ int cmd_reg;
+
+ i = 0;
+ sdhci_writel(host,
+ ((u32)cmd->uhs2_cmd->arg << 16) |
+ (u32)cmd->uhs2_cmd->header,
+ SDHCI_UHS2_CMD_PACKET + i);
+ i += 4;
+
+ /*
+ * Per spec, payload (config) should be MSB before sending out.
+ * But we don't need convert here because had set payload as
+ * MSB when preparing config read/write commands.
+ */
+ for (j = 0; j < cmd->uhs2_cmd->payload_len / sizeof(u32); j++) {
+ sdhci_writel(host, *(__force u32 *)(cmd->uhs2_cmd->payload + j),
+ SDHCI_UHS2_CMD_PACKET + i);
+ i += 4;
+ }
+
+ for ( ; i < SDHCI_UHS2_CMD_PACK_MAX_LEN; i += 4)
+ sdhci_writel(host, 0, SDHCI_UHS2_CMD_PACKET + i);
+
+ DBG("UHS2 CMD packet_len = %d.\n", cmd->uhs2_cmd->packet_len);
+ for (i = 0; i < cmd->uhs2_cmd->packet_len; i++)
+ DBG("UHS2 CMD_PACKET[%d] = 0x%x.\n", i,
+ sdhci_readb(host, SDHCI_UHS2_CMD_PACKET + i));
+
+ cmd_reg = FIELD_PREP(SDHCI_UHS2_CMD_PACK_LEN_MASK, cmd->uhs2_cmd->packet_len);
+ if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC)
+ cmd_reg |= SDHCI_UHS2_CMD_DATA;
+ if (cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd_reg |= SDHCI_UHS2_CMD_CMD12;
+
+ /* UHS2 Native ABORT */
+ if ((cmd->uhs2_cmd->header & UHS2_NATIVE_PACKET) &&
+ (uhs2_dev_cmd(cmd) == UHS2_DEV_CMD_TRANS_ABORT))
+ cmd_reg |= SDHCI_UHS2_CMD_TRNS_ABORT;
+
+ /* UHS2 Native DORMANT */
+ if ((cmd->uhs2_cmd->header & UHS2_NATIVE_PACKET) &&
+ (uhs2_dev_cmd(cmd) == UHS2_DEV_CMD_GO_DORMANT_STATE))
+ cmd_reg |= SDHCI_UHS2_CMD_DORMANT;
+
+ DBG("0x%x is set to UHS2 CMD register.\n", cmd_reg);
+
+ sdhci_writew(host, cmd_reg, SDHCI_UHS2_CMD);
+}
+
+static bool sdhci_uhs2_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ u32 mask;
+ unsigned long timeout;
+
+ WARN_ON(host->cmd);
+
+ /* Initially, a command has no error */
+ cmd->error = 0;
+
+ if (cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd->flags |= MMC_RSP_BUSY;
+
+ mask = SDHCI_CMD_INHIBIT;
+
+ if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
+ return false;
+
+ host->cmd = cmd;
+ host->data_timeout = 0;
+ if (sdhci_data_line_cmd(cmd)) {
+ WARN_ON(host->data_cmd);
+ host->data_cmd = cmd;
+ __sdhci_uhs2_set_timeout(host);
+ }
+
+ if (cmd->data)
+ sdhci_uhs2_prepare_data(host, cmd);
+
+ sdhci_uhs2_set_transfer_mode(host, cmd);
+
+ timeout = jiffies;
+ if (host->data_timeout)
+ timeout += nsecs_to_jiffies(host->data_timeout);
+ else if (!cmd->data && cmd->busy_timeout > 9000)
+ timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
+ else
+ timeout += 10 * HZ;
+ sdhci_mod_timer(host, cmd->mrq, timeout);
+
+ __sdhci_uhs2_send_command(host, cmd);
+
+ return true;
+}
+
+static bool sdhci_uhs2_send_command_retry(struct sdhci_host *host,
+ struct mmc_command *cmd,
+ unsigned long flags)
+ __releases(host->lock)
+ __acquires(host->lock)
+{
+ struct mmc_command *deferred_cmd = host->deferred_cmd;
+ int timeout = 10; /* Approx. 10 ms */
+ bool present;
+
+ while (!sdhci_uhs2_send_command(host, cmd)) {
+ if (!timeout--) {
+ pr_err("%s: Controller never released inhibit bit(s).\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ cmd->error = -EIO;
+ return false;
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ usleep_range(1000, 1250);
+
+ present = host->mmc->ops->get_cd(host->mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* A deferred command might disappear, handle that */
+ if (cmd == deferred_cmd && cmd != host->deferred_cmd)
+ return true;
+
+ if (sdhci_present_error(host, cmd, present))
+ return false;
+ }
+
+ if (cmd == host->deferred_cmd)
+ host->deferred_cmd = NULL;
+
+ return true;
+}
+
+static void __sdhci_uhs2_finish_command(struct sdhci_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ u8 resp;
+ u8 error_code;
+ bool breada0 = 0;
+ int i;
+
+ if (host->mmc->uhs2_sd_tran) {
+ resp = sdhci_readb(host, SDHCI_UHS2_RESPONSE + 2);
+ if (resp & UHS2_RES_NACK_MASK) {
+ error_code = (resp >> UHS2_RES_ECODE_POS) & UHS2_RES_ECODE_MASK;
+ pr_err("%s: NACK response, ECODE=0x%x.\n",
+ mmc_hostname(host->mmc), error_code);
+ }
+ breada0 = 1;
+ }
+
+ if (cmd->uhs2_cmd->uhs2_resp_len) {
+ int len = min_t(int, cmd->uhs2_cmd->uhs2_resp_len, UHS2_MAX_RESP_LEN);
+
+ /* Get whole response of some native CCMD, like
+ * DEVICE_INIT, ENUMERATE.
+ */
+ for (i = 0; i < len; i++)
+ cmd->uhs2_cmd->uhs2_resp[i] = sdhci_readb(host, SDHCI_UHS2_RESPONSE + i);
+ } else {
+ /* Get SD CMD response and Payload for some read
+ * CCMD, like INQUIRY_CFG.
+ */
+ /* Per spec (p136), payload field is divided into
+ * a unit of DWORD and transmission order within
+ * a DWORD is big endian.
+ */
+ if (!breada0)
+ sdhci_readl(host, SDHCI_UHS2_RESPONSE);
+ for (i = 4; i < 20; i += 4) {
+ cmd->resp[i / 4 - 1] =
+ (sdhci_readb(host,
+ SDHCI_UHS2_RESPONSE + i) << 24) |
+ (sdhci_readb(host,
+ SDHCI_UHS2_RESPONSE + i + 1)
+ << 16) |
+ (sdhci_readb(host,
+ SDHCI_UHS2_RESPONSE + i + 2)
+ << 8) |
+ sdhci_readb(host, SDHCI_UHS2_RESPONSE + i + 3);
+ }
+ }
+}
+
+static void sdhci_uhs2_finish_command(struct sdhci_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+
+ __sdhci_uhs2_finish_command(host);
+
+ host->cmd = NULL;
+
+ if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
+ mmc_command_done(host->mmc, cmd->mrq);
+
+ /*
+ * The host can send and interrupt when the busy state has
+ * ended, allowing us to wait without wasting CPU cycles.
+ * The busy signal uses DAT0 so this is similar to waiting
+ * for data to complete.
+ *
+ * Note: The 1.0 specification is a bit ambiguous about this
+ * feature so there might be some problems with older
+ * controllers.
+ */
+ if (cmd->flags & MMC_RSP_BUSY) {
+ if (cmd->data) {
+ DBG("Cannot wait for busy signal when also doing a data transfer");
+ } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
+ cmd == host->data_cmd) {
+ /* Command complete before busy is ended */
+ return;
+ }
+ }
+
+ /* Processed actual command. */
+ if (host->data && host->data_early)
+ sdhci_uhs2_finish_data(host);
+
+ if (!cmd->data)
+ __sdhci_finish_mrq(host, cmd->mrq);
+}
+
+static void sdhci_uhs2_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd;
+ unsigned long flags;
+ bool present;
+
+ if (!(mmc_card_uhs2(mmc))) {
+ sdhci_request(mmc, mrq);
+ return;
+ }
+
+ mrq->stop = NULL;
+ mrq->sbc = NULL;
+ if (mrq->data)
+ mrq->data->stop = NULL;
+
+ /* Firstly check card presence */
+ present = mmc->ops->get_cd(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (sdhci_present_error(host, mrq->cmd, present))
+ goto out_finish;
+
+ cmd = mrq->cmd;
+
+ if (!sdhci_uhs2_send_command_retry(host, cmd, flags))
+ goto out_finish;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return;
+
+out_finish:
+ sdhci_finish_mrq(host, mrq);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*****************************************************************************\
+ * *
+ * Request done *
+ * *
+\*****************************************************************************/
+
+static bool sdhci_uhs2_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ return sdhci_needs_reset(host, mrq) ||
+ (!(host->flags & SDHCI_DEVICE_DEAD) && mrq->data && mrq->data->error);
+}
+
+static bool sdhci_uhs2_request_done(struct sdhci_host *host)
+{
+ unsigned long flags;
+ struct mmc_request *mrq;
+ int i;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ mrq = host->mrqs_done[i];
+ if (mrq)
+ break;
+ }
+
+ if (!mrq) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return true;
+ }
+
+ /*
+ * Always unmap the data buffers if they were mapped by
+ * sdhci_prepare_data() whenever we finish with a request.
+ * This avoids leaking DMA mappings on error.
+ */
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ sdhci_request_done_dma(host, mrq);
+
+ /*
+ * The controller needs a reset of internal state machines
+ * upon error conditions.
+ */
+ if (sdhci_uhs2_needs_reset(host, mrq)) {
+ /*
+ * Do not finish until command and data lines are available for
+ * reset. Note there can only be one other mrq, so it cannot
+ * also be in mrqs_done, otherwise host->cmd and host->data_cmd
+ * would both be null.
+ */
+ if (host->cmd || host->data_cmd) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return true;
+ }
+
+ if (mrq->cmd->error || mrq->data->error)
+ sdhci_uhs2_reset_cmd_data(host);
+ else
+ sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
+ host->pending_reset = false;
+ }
+
+ host->mrqs_done[i] = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (host->ops->request_done)
+ host->ops->request_done(host, mrq);
+ else
+ mmc_request_done(host->mmc, mrq);
+
+ return false;
+}
+
+static void sdhci_uhs2_complete_work(struct work_struct *work)
+{
+ struct sdhci_host *host = container_of(work, struct sdhci_host,
+ complete_work);
+
+ if (!mmc_card_uhs2(host->mmc)) {
+ sdhci_complete_work(work);
+ return;
+ }
+
+ while (!sdhci_uhs2_request_done(host))
+ ;
+}
+
+/*****************************************************************************\
+ * *
+ * Interrupt handling *
+ * *
+\*****************************************************************************/
+
+static void __sdhci_uhs2_irq(struct sdhci_host *host, u32 uhs2mask)
+{
+ struct mmc_command *cmd = host->cmd;
+
+ DBG("*** %s got UHS2 error interrupt: 0x%08x\n",
+ mmc_hostname(host->mmc), uhs2mask);
+
+ if (uhs2mask & SDHCI_UHS2_INT_CMD_ERR_MASK) {
+ if (!host->cmd) {
+ pr_err("%s: Got cmd interrupt 0x%08x but no cmd.\n",
+ mmc_hostname(host->mmc),
+ (unsigned int)uhs2mask);
+ sdhci_dumpregs(host);
+ return;
+ }
+ host->cmd->error = -EILSEQ;
+ if (uhs2mask & SDHCI_UHS2_INT_CMD_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ }
+
+ if (uhs2mask & SDHCI_UHS2_INT_DATA_ERR_MASK) {
+ if (!host->data) {
+ pr_err("%s: Got data interrupt 0x%08x but no data.\n",
+ mmc_hostname(host->mmc),
+ (unsigned int)uhs2mask);
+ sdhci_dumpregs(host);
+ return;
+ }
+
+ if (uhs2mask & SDHCI_UHS2_INT_DEADLOCK_TIMEOUT) {
+ pr_err("%s: Got deadlock timeout interrupt 0x%08x\n",
+ mmc_hostname(host->mmc),
+ (unsigned int)uhs2mask);
+ host->data->error = -ETIMEDOUT;
+ } else if (uhs2mask & SDHCI_UHS2_INT_ADMA_ERROR) {
+ pr_err("%s: ADMA error = 0x %x\n",
+ mmc_hostname(host->mmc),
+ sdhci_readb(host, SDHCI_ADMA_ERROR));
+ host->data->error = -EIO;
+ } else {
+ host->data->error = -EILSEQ;
+ }
+ }
+
+ if (host->data && host->data->error)
+ sdhci_uhs2_finish_data(host);
+ else
+ sdhci_finish_mrq(host, cmd->mrq);
+
+}
+
+u32 sdhci_uhs2_irq(struct sdhci_host *host, u32 intmask)
+{
+ u32 mask = intmask, uhs2mask;
+
+ if (!mmc_card_uhs2(host->mmc))
+ goto out;
+
+ if (intmask & SDHCI_INT_ERROR) {
+ uhs2mask = sdhci_readl(host, SDHCI_UHS2_INT_STATUS);
+ if (!(uhs2mask & SDHCI_UHS2_INT_ERROR_MASK))
+ goto cmd_irq;
+
+ /* Clear error interrupts */
+ sdhci_writel(host, uhs2mask & SDHCI_UHS2_INT_ERROR_MASK,
+ SDHCI_UHS2_INT_STATUS);
+
+ /* Handle error interrupts */
+ __sdhci_uhs2_irq(host, uhs2mask);
+
+ /* Caller, sdhci_irq(), doesn't have to care about UHS-2 errors */
+ intmask &= ~SDHCI_INT_ERROR;
+ mask &= SDHCI_INT_ERROR;
+ }
+
+cmd_irq:
+ if (intmask & SDHCI_INT_CMD_MASK) {
+ /* Clear command interrupt */
+ sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, SDHCI_INT_STATUS);
+
+ /* Handle command interrupt */
+ if (intmask & SDHCI_INT_RESPONSE)
+ sdhci_uhs2_finish_command(host);
+
+ /* Caller, sdhci_irq(), doesn't have to care about UHS-2 commands */
+ intmask &= ~SDHCI_INT_CMD_MASK;
+ mask &= SDHCI_INT_CMD_MASK;
+ }
+
+ /* Clear already-handled interrupts. */
+ sdhci_writel(host, mask, SDHCI_INT_STATUS);
+
+out:
+ return intmask;
+}
+EXPORT_SYMBOL_GPL(sdhci_uhs2_irq);
+
+static irqreturn_t sdhci_uhs2_thread_irq(int irq, void *dev_id)
+{
+ struct sdhci_host *host = dev_id;
+ struct mmc_command *cmd;
+ unsigned long flags;
+ u32 isr;
+
+ if (!mmc_card_uhs2(host->mmc))
+ return sdhci_thread_irq(irq, dev_id);
+
+ while (!sdhci_uhs2_request_done(host))
+ ;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ isr = host->thread_isr;
+ host->thread_isr = 0;
+
+ cmd = host->deferred_cmd;
+ if (cmd && !sdhci_uhs2_send_command_retry(host, cmd, flags))
+ sdhci_finish_mrq(host, cmd->mrq);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc->ops->card_event(mmc);
+ mmc_detect_change(mmc, msecs_to_jiffies(200));
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*****************************************************************************\
+ * *
+ * Driver init/exit *
+ * *
+\*****************************************************************************/
+
+static int sdhci_uhs2_host_ops_init(struct sdhci_host *host)
+{
+ host->mmc_host_ops.uhs2_control = sdhci_uhs2_control;
+ host->mmc_host_ops.request = sdhci_uhs2_request;
+
+ return 0;
+}
+
+static int __init sdhci_uhs2_mod_init(void)
+{
+ return 0;
+}
+module_init(sdhci_uhs2_mod_init);
+
+static void __exit sdhci_uhs2_mod_exit(void)
+{
+}
+module_exit(sdhci_uhs2_mod_exit);
+
+/*****************************************************************************\
+ *
+ * Device allocation/registration *
+ * *
+\*****************************************************************************/
+
+static void __sdhci_uhs2_add_host_v4(struct sdhci_host *host, u32 caps1)
+{
+ struct mmc_host *mmc;
+ u32 max_current_caps2;
+
+ mmc = host->mmc;
+
+ /* Support UHS2 */
+ if (caps1 & SDHCI_SUPPORT_UHS2)
+ mmc->caps2 |= MMC_CAP2_SD_UHS2;
+
+ max_current_caps2 = sdhci_readl(host, SDHCI_MAX_CURRENT_1);
+
+ if ((caps1 & SDHCI_CAN_VDD2_180) &&
+ !max_current_caps2 &&
+ !IS_ERR(mmc->supply.vqmmc2)) {
+ /* UHS2 - VDD2 */
+ int curr = regulator_get_current_limit(mmc->supply.vqmmc2);
+
+ if (curr > 0) {
+ /* convert to SDHCI_MAX_CURRENT format */
+ curr = curr / 1000; /* convert to mA */
+ curr = curr / SDHCI_MAX_CURRENT_MULTIPLIER;
+ curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
+ max_current_caps2 = curr;
+ }
+ }
+
+ if (!(caps1 & SDHCI_CAN_VDD2_180))
+ mmc->caps2 &= ~MMC_CAP2_SD_UHS2;
+}
+
+static void __sdhci_uhs2_remove_host(struct sdhci_host *host, int dead)
+{
+ if (!mmc_card_uhs2(host->mmc))
+ return;
+
+ if (!dead)
+ sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_FULL);
+}
+
+int sdhci_uhs2_add_host(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ int ret;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ if (host->version >= SDHCI_SPEC_400)
+ __sdhci_uhs2_add_host_v4(host, host->caps1);
+
+ if ((mmc->caps2 & MMC_CAP2_SD_UHS2) && !host->v4_mode)
+ /* host doesn't want to enable UHS2 support */
+ mmc->caps2 &= ~MMC_CAP2_SD_UHS2;
+
+ /* overwrite ops */
+ if (mmc->caps2 & MMC_CAP2_SD_UHS2)
+ sdhci_uhs2_host_ops_init(host);
+
+ host->complete_work_fn = sdhci_uhs2_complete_work;
+ host->thread_irq_fn = sdhci_uhs2_thread_irq;
+
+ /* LED support not implemented for UHS2 */
+ host->quirks |= SDHCI_QUIRK_NO_LED;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ if (host->version >= SDHCI_SPEC_400)
+ __sdhci_uhs2_remove_host(host, 0);
+
+ sdhci_cleanup_host(host);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdhci_uhs2_add_host);
+
+void sdhci_uhs2_remove_host(struct sdhci_host *host, int dead)
+{
+ __sdhci_uhs2_remove_host(host, dead);
+
+ sdhci_remove_host(host, dead);
+}
+EXPORT_SYMBOL_GPL(sdhci_uhs2_remove_host);
+
+MODULE_AUTHOR("Intel, Genesys Logic, Linaro");
+MODULE_DESCRIPTION("MMC UHS-II Support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-uhs2.h b/drivers/mmc/host/sdhci-uhs2.h
new file mode 100644
index 000000000000..da6905919630
--- /dev/null
+++ b/drivers/mmc/host/sdhci-uhs2.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Header file for Host Controller UHS2 related registers.
+ *
+ * Copyright (C) 2014 Intel Corp, All Rights Reserved.
+ */
+#ifndef __SDHCI_UHS2_H
+#define __SDHCI_UHS2_H
+
+#include <linux/bits.h>
+
+/* SDHCI Category C registers : UHS2 usage */
+
+#define SDHCI_UHS2_CM_TRAN_RESP 0x10
+#define SDHCI_UHS2_SD_TRAN_RESP 0x18
+#define SDHCI_UHS2_SD_TRAN_RESP_1 0x1C
+
+/* SDHCI Category B registers : UHS2 only */
+
+#define SDHCI_UHS2_BLOCK_SIZE 0x80
+#define SDHCI_UHS2_MAKE_BLKSZ(dma, blksz) ((((dma) & 0x7) << 12) | ((blksz) & 0xFFF))
+
+#define SDHCI_UHS2_BLOCK_COUNT 0x84
+
+#define SDHCI_UHS2_CMD_PACKET 0x88
+#define SDHCI_UHS2_CMD_PACK_MAX_LEN 20
+
+#define SDHCI_UHS2_TRANS_MODE 0x9C
+#define SDHCI_UHS2_TRNS_DMA BIT(0)
+#define SDHCI_UHS2_TRNS_BLK_CNT_EN BIT(1)
+#define SDHCI_UHS2_TRNS_DATA_TRNS_WRT BIT(4)
+#define SDHCI_UHS2_TRNS_BLK_BYTE_MODE BIT(5)
+#define SDHCI_UHS2_TRNS_RES_R5 BIT(6)
+#define SDHCI_UHS2_TRNS_RES_ERR_CHECK_EN BIT(7)
+#define SDHCI_UHS2_TRNS_RES_INT_DIS BIT(8)
+#define SDHCI_UHS2_TRNS_WAIT_EBSY BIT(14)
+#define SDHCI_UHS2_TRNS_2L_HD BIT(15)
+
+#define SDHCI_UHS2_CMD 0x9E
+#define SDHCI_UHS2_CMD_SUB_CMD BIT(2)
+#define SDHCI_UHS2_CMD_DATA BIT(5)
+#define SDHCI_UHS2_CMD_TRNS_ABORT BIT(6)
+#define SDHCI_UHS2_CMD_CMD12 BIT(7)
+#define SDHCI_UHS2_CMD_DORMANT GENMASK(7, 6)
+#define SDHCI_UHS2_CMD_PACK_LEN_MASK GENMASK(12, 8)
+
+#define SDHCI_UHS2_RESPONSE 0xA0
+#define SDHCI_UHS2_RESPONSE_MAX_LEN 20
+
+#define SDHCI_UHS2_MSG_SELECT 0xB4
+#define SDHCI_UHS2_MSG_SELECT_CURR 0x0
+#define SDHCI_UHS2_MSG_SELECT_ONE 0x1
+#define SDHCI_UHS2_MSG_SELECT_TWO 0x2
+#define SDHCI_UHS2_MSG_SELECT_THREE 0x3
+
+#define SDHCI_UHS2_MSG 0xB8
+
+#define SDHCI_UHS2_DEV_INT_STATUS 0xBC
+
+#define SDHCI_UHS2_DEV_SELECT 0xBE
+#define SDHCI_UHS2_DEV_SEL_MASK GENMASK(3, 0)
+#define SDHCI_UHS2_DEV_SEL_INT_MSG_EN BIT(7)
+
+#define SDHCI_UHS2_DEV_INT_CODE 0xBF
+
+#define SDHCI_UHS2_SW_RESET 0xC0
+#define SDHCI_UHS2_SW_RESET_FULL BIT(0)
+#define SDHCI_UHS2_SW_RESET_SD BIT(1)
+
+#define SDHCI_UHS2_TIMER_CTRL 0xC2
+#define SDHCI_UHS2_TIMER_CTRL_DEADLOCK_MASK GENMASK(7, 4)
+
+#define SDHCI_UHS2_INT_STATUS 0xC4
+#define SDHCI_UHS2_INT_STATUS_ENABLE 0xC8
+#define SDHCI_UHS2_INT_SIGNAL_ENABLE 0xCC
+#define SDHCI_UHS2_INT_HEADER_ERR BIT(0)
+#define SDHCI_UHS2_INT_RES_ERR BIT(1)
+#define SDHCI_UHS2_INT_RETRY_EXP BIT(2)
+#define SDHCI_UHS2_INT_CRC BIT(3)
+#define SDHCI_UHS2_INT_FRAME_ERR BIT(4)
+#define SDHCI_UHS2_INT_TID_ERR BIT(5)
+#define SDHCI_UHS2_INT_UNRECOVER BIT(7)
+#define SDHCI_UHS2_INT_EBUSY_ERR BIT(8)
+#define SDHCI_UHS2_INT_ADMA_ERROR BIT(15)
+#define SDHCI_UHS2_INT_CMD_TIMEOUT BIT(16)
+#define SDHCI_UHS2_INT_DEADLOCK_TIMEOUT BIT(17)
+#define SDHCI_UHS2_INT_VENDOR_ERR BIT(27)
+#define SDHCI_UHS2_INT_ERROR_MASK ( \
+ SDHCI_UHS2_INT_HEADER_ERR | \
+ SDHCI_UHS2_INT_RES_ERR | \
+ SDHCI_UHS2_INT_RETRY_EXP | \
+ SDHCI_UHS2_INT_CRC | \
+ SDHCI_UHS2_INT_FRAME_ERR | \
+ SDHCI_UHS2_INT_TID_ERR | \
+ SDHCI_UHS2_INT_UNRECOVER | \
+ SDHCI_UHS2_INT_EBUSY_ERR | \
+ SDHCI_UHS2_INT_ADMA_ERROR | \
+ SDHCI_UHS2_INT_CMD_TIMEOUT | \
+ SDHCI_UHS2_INT_DEADLOCK_TIMEOUT)
+#define SDHCI_UHS2_INT_CMD_ERR_MASK ( \
+ SDHCI_UHS2_INT_HEADER_ERR | \
+ SDHCI_UHS2_INT_RES_ERR | \
+ SDHCI_UHS2_INT_FRAME_ERR | \
+ SDHCI_UHS2_INT_TID_ERR | \
+ SDHCI_UHS2_INT_CMD_TIMEOUT)
+/* CRC Error occurs during a packet receiving */
+#define SDHCI_UHS2_INT_DATA_ERR_MASK ( \
+ SDHCI_UHS2_INT_RETRY_EXP | \
+ SDHCI_UHS2_INT_CRC | \
+ SDHCI_UHS2_INT_UNRECOVER | \
+ SDHCI_UHS2_INT_EBUSY_ERR | \
+ SDHCI_UHS2_INT_ADMA_ERROR | \
+ SDHCI_UHS2_INT_DEADLOCK_TIMEOUT)
+
+#define SDHCI_UHS2_SETTINGS_PTR 0xE0
+#define SDHCI_UHS2_GEN_SETTINGS_POWER_LOW BIT(0)
+#define SDHCI_UHS2_GEN_SETTINGS_N_LANES_MASK GENMASK(11, 8)
+#define SDHCI_UHS2_FD_OR_2L_HD 0x0 /* 2 lanes */
+#define SDHCI_UHS2_2D1U_FD 0x2 /* 3 lanes, 2 down, 1 up, full duplex */
+#define SDHCI_UHS2_1D2U_FD 0x3 /* 3 lanes, 1 down, 2 up, full duplex */
+#define SDHCI_UHS2_2D2U_FD 0x4 /* 4 lanes, 2 down, 2 up, full duplex */
+
+#define SDHCI_UHS2_PHY_SET_SPEED_B BIT(6)
+#define SDHCI_UHS2_PHY_HIBERNATE_EN BIT(12)
+#define SDHCI_UHS2_PHY_N_LSS_SYN_MASK GENMASK(19, 16)
+#define SDHCI_UHS2_PHY_N_LSS_DIR_MASK GENMASK(23, 20)
+
+#define SDHCI_UHS2_TRAN_N_FCU_MASK GENMASK(15, 8)
+#define SDHCI_UHS2_TRAN_RETRY_CNT_MASK GENMASK(17, 16)
+#define SDHCI_UHS2_TRAN_1_N_DAT_GAP_MASK GENMASK(7, 0)
+
+#define SDHCI_UHS2_CAPS_PTR 0xE2
+#define SDHCI_UHS2_CAPS_OFFSET 0
+#define SDHCI_UHS2_CAPS_DAP_MASK GENMASK(3, 0)
+#define SDHCI_UHS2_CAPS_GAP_MASK GENMASK(7, 4)
+#define SDHCI_UHS2_CAPS_GAP(gap) ((gap) * 360)
+#define SDHCI_UHS2_CAPS_LANE_MASK GENMASK(13, 8)
+#define SDHCI_UHS2_CAPS_2L_HD_FD 1
+#define SDHCI_UHS2_CAPS_2D1U_FD 2
+#define SDHCI_UHS2_CAPS_1D2U_FD 4
+#define SDHCI_UHS2_CAPS_2D2U_FD 8
+#define SDHCI_UHS2_CAPS_ADDR_64 BIT(14)
+#define SDHCI_UHS2_CAPS_BOOT BIT(15)
+#define SDHCI_UHS2_CAPS_DEV_TYPE_MASK GENMASK(17, 16)
+#define SDHCI_UHS2_CAPS_DEV_TYPE_RMV 0
+#define SDHCI_UHS2_CAPS_DEV_TYPE_EMB 1
+#define SDHCI_UHS2_CAPS_DEV_TYPE_EMB_RMV 2
+#define SDHCI_UHS2_CAPS_NUM_DEV_MASK GENMASK(21, 18)
+#define SDHCI_UHS2_CAPS_BUS_TOPO_MASK GENMASK(23, 22)
+#define SDHCI_UHS2_CAPS_BUS_TOPO_SHIFT 22
+#define SDHCI_UHS2_CAPS_BUS_TOPO_P2P 0
+#define SDHCI_UHS2_CAPS_BUS_TOPO_RING 1
+#define SDHCI_UHS2_CAPS_BUS_TOPO_HUB 2
+#define SDHCI_UHS2_CAPS_BUS_TOPO_HUB_RING 3
+
+#define SDHCI_UHS2_CAPS_PHY_OFFSET 4
+#define SDHCI_UHS2_CAPS_PHY_REV_MASK GENMASK(5, 0)
+#define SDHCI_UHS2_CAPS_PHY_RANGE_MASK GENMASK(7, 6)
+#define SDHCI_UHS2_CAPS_PHY_RANGE_A 0
+#define SDHCI_UHS2_CAPS_PHY_RANGE_B 1
+#define SDHCI_UHS2_CAPS_PHY_N_LSS_SYN_MASK GENMASK(19, 16)
+#define SDHCI_UHS2_CAPS_PHY_N_LSS_DIR_MASK GENMASK(23, 20)
+#define SDHCI_UHS2_CAPS_TRAN_OFFSET 8
+#define SDHCI_UHS2_CAPS_TRAN_LINK_REV_MASK GENMASK(5, 0)
+#define SDHCI_UHS2_CAPS_TRAN_N_FCU_MASK GENMASK(15, 8)
+#define SDHCI_UHS2_CAPS_TRAN_HOST_TYPE_MASK GENMASK(18, 16)
+#define SDHCI_UHS2_CAPS_TRAN_BLK_LEN_MASK GENMASK(31, 20)
+
+#define SDHCI_UHS2_CAPS_TRAN_1_OFFSET 12
+#define SDHCI_UHS2_CAPS_TRAN_1_N_DATA_GAP_MASK GENMASK(7, 0)
+
+#define SDHCI_UHS2_EMBED_CTRL_PTR 0xE6
+#define SDHCI_UHS2_VENDOR_PTR 0xE8
+
+struct sdhci_host;
+struct mmc_command;
+struct mmc_request;
+
+void sdhci_uhs2_dump_regs(struct sdhci_host *host);
+void sdhci_uhs2_reset(struct sdhci_host *host, u16 mask);
+void sdhci_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd);
+void sdhci_uhs2_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
+int sdhci_uhs2_add_host(struct sdhci_host *host);
+void sdhci_uhs2_remove_host(struct sdhci_host *host, int dead);
+void sdhci_uhs2_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set);
+u32 sdhci_uhs2_irq(struct sdhci_host *host, u32 intmask);
+
+#endif /* __SDHCI_UHS2_H */
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index 8cf3a375de65..cc9d28b75eb9 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ktime.h>
+#include <linux/iopoll.h>
#include <linux/of_address.h>
#include "sdhci-pltfm.h"
@@ -109,6 +110,8 @@
#define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18)
#define XENON_LOGIC_TIMING_VALUE 0x00AA8977
+#define XENON_MAX_PHY_TIMEOUT_LOOPS 100
+
/*
* List offset of PHY registers and some special register values
* in eMMC PHY 5.0 or eMMC PHY 5.1
@@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
return 0;
}
+static int xenon_check_stability_internal_clk(struct sdhci_host *host)
+{
+ u32 reg;
+ int err;
+
+ err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
+ 1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
+ if (err)
+ dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
+
+ return err;
+}
+
/*
* eMMC 5.0/5.1 PHY init/re-init.
* eMMC PHY init should be executed after:
@@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
+ int ret = xenon_check_stability_internal_clk(host);
+
+ if (ret)
+ return ret;
+
reg = sdhci_readl(host, phy_regs->timing_adj);
reg |= XENON_PHY_INITIALIZAION;
sdhci_writel(host, reg, phy_regs->timing_adj);
@@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
/* get the wait time */
wait /= clock;
wait++;
- /* wait for host eMMC PHY init completes */
- udelay(wait);
- reg = sdhci_readl(host, phy_regs->timing_adj);
- reg &= XENON_PHY_INITIALIZAION;
- if (reg) {
+ /*
+ * AC5X spec says bit must be polled until zero.
+ * We see cases in which timeout can take longer
+ * than the standard calculation on AC5X, which is
+ * expected following the spec comment above.
+ * According to the spec, we must wait as long as
+ * it takes for that bit to toggle on AC5X.
+ * Cap that with 100 delay loops so we won't get
+ * stuck here forever:
+ */
+
+ ret = read_poll_timeout(sdhci_readl, reg,
+ !(reg & XENON_PHY_INITIALIZAION),
+ wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
+ false, host, phy_regs->timing_adj);
+ if (ret)
dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
- wait);
- return -ETIMEDOUT;
- }
+ wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
- return 0;
+ return ret;
}
#define ARMADA_3700_SOC_PAD_1_8V 0x1
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 666cee4c7f7c..046e8100dd08 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -18,6 +18,8 @@
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
#include "sdhci-pltfm.h"
#include "sdhci-xenon.h"
@@ -241,16 +243,6 @@ static void xenon_voltage_switch(struct sdhci_host *host)
{
/* Wait for 5ms after set 1.8V signal enable bit */
usleep_range(5000, 5500);
-
- /*
- * For some reason the controller's Host Control2 register reports
- * the bit representing 1.8V signaling as 0 when read after it was
- * written as 1. Subsequent read reports 1.
- *
- * Since this may cause some issues, do an empty read of the Host
- * Control2 register here to circumvent this.
- */
- sdhci_readw(host, SDHCI_HOST_CONTROL2);
}
static unsigned int xenon_get_max_clock(struct sdhci_host *host)
@@ -432,6 +424,7 @@ static int xenon_probe_params(struct platform_device *pdev)
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
u32 sdhc_id, nr_sdhc;
u32 tuning_count;
+ struct sysinfo si;
/* Disable HS200 on Armada AP806 */
if (priv->hw_version == XENON_AP806)
@@ -460,6 +453,23 @@ static int xenon_probe_params(struct platform_device *pdev)
}
priv->tuning_count = tuning_count;
+ /*
+ * AC5/X/IM HW has only 31-bits passed in the crossbar switch.
+ * If we have more than 2GB of memory, this means we might pass
+ * memory pointers which are above 2GB and which cannot be properly
+ * represented. In this case, disable ADMA, 64-bit DMA and allow only SDMA.
+ * This effectively will enable bounce buffer quirk in the
+ * generic SDHCI driver, which will make sure DMA is only done
+ * from supported memory regions:
+ */
+ if (priv->hw_version == XENON_AC5) {
+ si_meminfo(&si);
+ if (si.totalram * si.mem_unit > SZ_2G) {
+ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA;
+ }
+ }
+
return xenon_phy_parse_params(dev, host);
}
@@ -522,14 +532,13 @@ static int xenon_probe(struct platform_device *pdev)
if (dev->of_node) {
pltfm_host->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(pltfm_host->clk)) {
- err = PTR_ERR(pltfm_host->clk);
- dev_err(&pdev->dev, "Failed to setup input clk: %d\n", err);
- goto free_pltfm;
- }
+ if (IS_ERR(pltfm_host->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pltfm_host->clk),
+ "Failed to setup input clk.\n");
+
err = clk_prepare_enable(pltfm_host->clk);
if (err)
- goto free_pltfm;
+ return err;
priv->axi_clk = devm_clk_get(&pdev->dev, "axi");
if (IS_ERR(priv->axi_clk)) {
@@ -572,6 +581,16 @@ static int xenon_probe(struct platform_device *pdev)
goto remove_sdhc;
pm_runtime_put_autosuspend(&pdev->dev);
+ /*
+ * If we previously detected AC5 with over 2GB of memory,
+ * then we disable ADMA and 64-bit DMA.
+ * This means generic SDHCI driver has set the DMA mask to
+ * 32-bit. Since DDR starts at 0x2_0000_0000, we must use
+ * 34-bit DMA mask to access this DDR memory:
+ */
+ if (priv->hw_version == XENON_AC5 &&
+ host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
return 0;
@@ -583,12 +602,10 @@ err_clk_axi:
clk_disable_unprepare(priv->axi_clk);
err_clk:
clk_disable_unprepare(pltfm_host->clk);
-free_pltfm:
- sdhci_pltfm_free(pdev);
return err;
}
-static int xenon_remove(struct platform_device *pdev)
+static void xenon_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -603,13 +620,8 @@ static int xenon_remove(struct platform_device *pdev)
xenon_sdhc_unprepare(host);
clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(pltfm_host->clk);
-
- sdhci_pltfm_free(pdev);
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int xenon_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -622,19 +634,14 @@ static int xenon_suspend(struct device *dev)
priv->restore_needed = true;
return ret;
}
-#endif
-#ifdef CONFIG_PM
static int xenon_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
- int ret;
- ret = sdhci_runtime_suspend_host(host);
- if (ret)
- return ret;
+ sdhci_runtime_suspend_host(host);
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
@@ -669,22 +676,16 @@ static int xenon_runtime_resume(struct device *dev)
priv->restore_needed = false;
}
- ret = sdhci_runtime_resume_host(host, 0);
- if (ret)
- goto out;
+ sdhci_runtime_resume_host(host, 0);
return 0;
out:
clk_disable_unprepare(pltfm_host->clk);
return ret;
}
-#endif /* CONFIG_PM */
static const struct dev_pm_ops sdhci_xenon_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(xenon_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(xenon_runtime_suspend,
- xenon_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(xenon_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(xenon_runtime_suspend, xenon_runtime_resume, NULL)
};
static const struct of_device_id sdhci_xenon_dt_ids[] = {
@@ -692,6 +693,7 @@ static const struct of_device_id sdhci_xenon_dt_ids[] = {
{ .compatible = "marvell,armada-ap807-sdhci", .data = (void *)XENON_AP807},
{ .compatible = "marvell,armada-cp110-sdhci", .data = (void *)XENON_CP110},
{ .compatible = "marvell,armada-3700-sdhci", .data = (void *)XENON_A3700},
+ { .compatible = "marvell,ac5-sdhci", .data = (void *)XENON_AC5},
{}
};
MODULE_DEVICE_TABLE(of, sdhci_xenon_dt_ids);
@@ -712,10 +714,10 @@ static struct platform_driver sdhci_xenon_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_xenon_dt_ids,
.acpi_match_table = ACPI_PTR(sdhci_xenon_acpi_ids),
- .pm = &sdhci_xenon_dev_pm_ops,
+ .pm = pm_ptr(&sdhci_xenon_dev_pm_ops),
},
.probe = xenon_probe,
- .remove = xenon_remove,
+ .remove = xenon_remove,
};
module_platform_driver(sdhci_xenon_driver);
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h
index 3e9c6c908a79..0460d97aad26 100644
--- a/drivers/mmc/host/sdhci-xenon.h
+++ b/drivers/mmc/host/sdhci-xenon.h
@@ -57,7 +57,8 @@ enum xenon_variant {
XENON_A3700,
XENON_AP806,
XENON_AP807,
- XENON_CP110
+ XENON_CP110,
+ XENON_AC5
};
struct xenon_priv {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index aba6e10b8605..ac7e11f37af7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -23,7 +23,7 @@
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
-
+#include <linux/bug.h>
#include <linux/leds.h>
#include <linux/mmc/mmc.h>
@@ -47,8 +47,6 @@
static unsigned int debug_quirks = 0;
static unsigned int debug_quirks2;
-static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
-
static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
void sdhci_dumpregs(struct sdhci_host *host)
@@ -110,6 +108,9 @@ void sdhci_dumpregs(struct sdhci_host *host)
}
}
+ if (host->ops->dump_uhs2_regs)
+ host->ops->dump_uhs2_regs(host);
+
if (host->ops->dump_vendor_regs)
host->ops->dump_vendor_regs(host);
@@ -146,17 +147,18 @@ void sdhci_enable_v4_mode(struct sdhci_host *host)
}
EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
-static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
+bool sdhci_data_line_cmd(struct mmc_command *cmd)
{
return cmd->data || cmd->flags & MMC_RSP_BUSY;
}
+EXPORT_SYMBOL_GPL(sdhci_data_line_cmd);
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
u32 present;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
- !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
+ !mmc_card_is_removable(host->mmc) || mmc_host_can_gpio_cd(host->mmc))
return;
if (enable) {
@@ -224,6 +226,7 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
if (timedout) {
pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -232,28 +235,68 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
}
EXPORT_SYMBOL_GPL(sdhci_reset);
-static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
+bool sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
struct mmc_host *mmc = host->mmc;
if (!mmc->ops->get_cd(mmc))
- return;
+ return false;
}
host->ops->reset(host, mask);
- if (mask & SDHCI_RESET_ALL) {
+ return true;
+}
+EXPORT_SYMBOL_GPL(sdhci_do_reset);
+
+static void sdhci_reset_for_all(struct sdhci_host *host)
+{
+ if (sdhci_do_reset(host, SDHCI_RESET_ALL)) {
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
}
-
/* Resetting the controller clears many */
host->preset_enabled = false;
}
}
+enum sdhci_reset_reason {
+ SDHCI_RESET_FOR_INIT,
+ SDHCI_RESET_FOR_REQUEST_ERROR,
+ SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY,
+ SDHCI_RESET_FOR_TUNING_ABORT,
+ SDHCI_RESET_FOR_CARD_REMOVED,
+ SDHCI_RESET_FOR_CQE_RECOVERY,
+};
+
+static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason)
+{
+ if (host->quirks2 & SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER) {
+ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ return;
+ }
+
+ switch (reason) {
+ case SDHCI_RESET_FOR_INIT:
+ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ break;
+ case SDHCI_RESET_FOR_REQUEST_ERROR:
+ case SDHCI_RESET_FOR_TUNING_ABORT:
+ case SDHCI_RESET_FOR_CARD_REMOVED:
+ case SDHCI_RESET_FOR_CQE_RECOVERY:
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ break;
+ case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY:
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ break;
+ }
+}
+
+#define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r)
+
static void sdhci_set_default_irqs(struct sdhci_host *host)
{
host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
@@ -322,9 +365,9 @@ static void sdhci_init(struct sdhci_host *host, int soft)
unsigned long flags;
if (soft)
- sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ sdhci_reset_for(host, INIT);
else
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
if (host->v4_mode)
sdhci_do_enable_v4_mode(host);
@@ -338,6 +381,7 @@ static void sdhci_init(struct sdhci_host *host, int soft)
if (soft) {
/* force clock reconfiguration */
host->clock = 0;
+ host->reinit_uhs = true;
mmc->ops->set_ios(mmc, &mmc->ios);
}
}
@@ -460,21 +504,22 @@ static inline void sdhci_led_deactivate(struct sdhci_host *host)
#endif
-static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
- unsigned long timeout)
+void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
+ unsigned long timeout)
{
if (sdhci_data_line_cmd(mrq->cmd))
mod_timer(&host->data_timer, timeout);
else
mod_timer(&host->timer, timeout);
}
+EXPORT_SYMBOL_GPL(sdhci_mod_timer);
static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
{
if (sdhci_data_line_cmd(mrq->cmd))
- del_timer(&host->data_timer);
+ timer_delete(&host->data_timer);
else
- del_timer(&host->timer);
+ timer_delete(&host->timer);
}
static inline bool sdhci_has_requests(struct sdhci_host *host)
@@ -490,7 +535,6 @@ static inline bool sdhci_has_requests(struct sdhci_host *host)
static void sdhci_read_block_pio(struct sdhci_host *host)
{
- unsigned long flags;
size_t blksize, len, chunk;
u32 scratch;
u8 *buf;
@@ -500,8 +544,6 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
blksize = host->data->blksz;
chunk = 0;
- local_irq_save(flags);
-
while (blksize) {
BUG_ON(!sg_miter_next(&host->sg_miter));
@@ -528,13 +570,10 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
}
sg_miter_stop(&host->sg_miter);
-
- local_irq_restore(flags);
}
static void sdhci_write_block_pio(struct sdhci_host *host)
{
- unsigned long flags;
size_t blksize, len, chunk;
u32 scratch;
u8 *buf;
@@ -545,8 +584,6 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
chunk = 0;
scratch = 0;
- local_irq_save(flags);
-
while (blksize) {
BUG_ON(!sg_miter_next(&host->sg_miter));
@@ -573,8 +610,6 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
}
sg_miter_stop(&host->sg_miter);
-
- local_irq_restore(flags);
}
static void sdhci_transfer_pio(struct sdhci_host *host)
@@ -670,16 +705,14 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
return sg_count;
}
-static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
+static char *sdhci_kmap_atomic(struct scatterlist *sg)
{
- local_irq_save(*flags);
- return kmap_atomic(sg_page(sg)) + sg->offset;
+ return kmap_local_page(sg_page(sg)) + sg->offset;
}
-static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
+static void sdhci_kunmap_atomic(void *buffer)
{
- kunmap_atomic(buffer);
- local_irq_restore(*flags);
+ kunmap_local(buffer);
}
void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
@@ -721,7 +754,6 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
struct mmc_data *data, int sg_count)
{
struct scatterlist *sg;
- unsigned long flags;
dma_addr_t addr, align_addr;
void *desc, *align;
char *buffer;
@@ -753,9 +785,9 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
SDHCI_ADMA2_MASK;
if (offset) {
if (data->flags & MMC_DATA_WRITE) {
- buffer = sdhci_kmap_atomic(sg, &flags);
+ buffer = sdhci_kmap_atomic(sg);
memcpy(align, buffer, offset);
- sdhci_kunmap_atomic(buffer, &flags);
+ sdhci_kunmap_atomic(buffer);
}
/* tran, valid */
@@ -771,7 +803,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
len -= offset;
}
- BUG_ON(len > 65536);
+ /*
+ * The block layer forces a minimum segment size of PAGE_SIZE,
+ * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
+ * multiple descriptors, noting that the ADMA table is sized
+ * for 4KiB chunks anyway, so it will be big enough.
+ */
+ while (len > host->max_adma) {
+ int n = 32 * 1024; /* 32KiB*/
+
+ __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
+ addr += n;
+ len -= n;
+ }
/* tran, valid */
if (len)
@@ -804,7 +848,6 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
int i, size;
void *align;
char *buffer;
- unsigned long flags;
if (data->flags & MMC_DATA_READ) {
bool has_unaligned = false;
@@ -827,9 +870,9 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
size = SDHCI_ADMA2_ALIGN -
(sg_dma_address(sg) & SDHCI_ADMA2_MASK);
- buffer = sdhci_kmap_atomic(sg, &flags);
+ buffer = sdhci_kmap_atomic(sg);
memcpy(buffer, align, size);
- sdhci_kunmap_atomic(buffer, &flags);
+ sdhci_kunmap_atomic(buffer);
align += SDHCI_ADMA2_ALIGN;
}
@@ -930,25 +973,25 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
struct mmc_data *data;
unsigned target_timeout, current_timeout;
- *too_big = true;
+ *too_big = false;
/*
* If the host controller provides us with an incorrect timeout
- * value, just skip the check and use 0xE. The hardware may take
+ * value, just skip the check and use the maximum. The hardware may take
* longer to time out, but that's much better than having a too-short
* timeout value.
*/
if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
- return 0xE;
+ return host->max_timeout_count;
- /* Unspecified command, asume max */
+ /* Unspecified command, assume max */
if (cmd == NULL)
- return 0xE;
+ return host->max_timeout_count;
data = cmd->data;
/* Unspecified timeout, assume max */
if (!data && !cmd->busy_timeout)
- return 0xE;
+ return host->max_timeout_count;
/* timeout in us */
target_timeout = sdhci_target_timeout(host, cmd, data);
@@ -968,17 +1011,14 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
while (current_timeout < target_timeout) {
count++;
current_timeout <<= 1;
- if (count >= 0xF)
+ if (count > host->max_timeout_count) {
+ if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
+ DBG("Too large timeout 0x%x requested for CMD%d!\n",
+ count, cmd->opcode);
+ count = host->max_timeout_count;
+ *too_big = true;
break;
- }
-
- if (count >= 0xF) {
- if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
- DBG("Too large timeout 0x%x requested for CMD%d!\n",
- count, cmd->opcode);
- count = 0xE;
- } else {
- *too_big = false;
+ }
}
return count;
@@ -1039,8 +1079,7 @@ static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
__sdhci_set_timeout(host, cmd);
}
-static void sdhci_initialize_data(struct sdhci_host *host,
- struct mmc_data *data)
+void sdhci_initialize_data(struct sdhci_host *host, struct mmc_data *data)
{
WARN_ON(host->data);
@@ -1053,6 +1092,7 @@ static void sdhci_initialize_data(struct sdhci_host *host,
host->data_early = 0;
host->data->bytes_xfered = 0;
}
+EXPORT_SYMBOL_GPL(sdhci_initialize_data);
static inline void sdhci_set_block_info(struct sdhci_host *host,
struct mmc_data *data)
@@ -1075,12 +1115,8 @@ static inline void sdhci_set_block_info(struct sdhci_host *host,
}
}
-static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data)
{
- struct mmc_data *data = cmd->data;
-
- sdhci_initialize_data(host, data);
-
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
struct scatterlist *sg;
unsigned int length_mask, offset_mask;
@@ -1131,6 +1167,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
}
}
+ sdhci_config_dma(host);
+
if (host->flags & SDHCI_REQ_USE_DMA) {
int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
@@ -1150,8 +1188,6 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
}
}
- sdhci_config_dma(host);
-
if (!(host->flags & SDHCI_REQ_USE_DMA)) {
int flags;
@@ -1165,6 +1201,16 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
}
sdhci_set_transfer_irqs(host);
+}
+EXPORT_SYMBOL_GPL(sdhci_prepare_dma);
+
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ struct mmc_data *data = cmd->data;
+
+ sdhci_initialize_data(host, data);
+
+ sdhci_prepare_dma(host, data);
sdhci_set_block_info(host, data);
}
@@ -1222,6 +1268,7 @@ static int sdhci_external_dma_setup(struct sdhci_host *host,
if (!host->mapbase)
return -EINVAL;
+ memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = host->mapbase + SDHCI_BUFFER;
cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -1420,7 +1467,7 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
if (host->quirks2 &
SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
/* must not clear SDHCI_TRANSFER_MODE when tuning */
- if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
+ if (!mmc_op_tuning(cmd->opcode))
sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
} else {
/* clear Auto CMD settings for no data CMDs */
@@ -1451,7 +1498,7 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
}
-static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
+bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
{
return (!(host->flags & SDHCI_DEVICE_DEAD) &&
((mrq->cmd && mrq->cmd->error) ||
@@ -1459,6 +1506,7 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
(mrq->data && mrq->data->stop && mrq->data->stop->error) ||
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}
+EXPORT_SYMBOL_GPL(sdhci_needs_reset);
static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
{
@@ -1481,7 +1529,7 @@ static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
WARN_ON(i >= SDHCI_MAX_MRQS);
}
-static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
if (host->cmd && host->cmd->mrq == mrq)
host->cmd = NULL;
@@ -1505,15 +1553,17 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
if (!sdhci_has_requests(host))
sdhci_led_deactivate(host);
}
+EXPORT_SYMBOL_GPL(__sdhci_finish_mrq);
-static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
__sdhci_finish_mrq(host, mrq);
queue_work(host->complete_wq, &host->complete_work);
}
+EXPORT_SYMBOL_GPL(sdhci_finish_mrq);
-static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
+void __sdhci_finish_data_common(struct sdhci_host *host, bool defer_reset)
{
struct mmc_command *data_cmd = host->data_cmd;
struct mmc_data *data = host->data;
@@ -1526,9 +1576,12 @@ static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
* conditions.
*/
if (data->error) {
- if (!host->cmd || host->cmd == data_cmd)
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ if (defer_reset)
+ host->pending_reset = true;
+ else if (!host->cmd || host->cmd == data_cmd)
+ sdhci_reset_for(host, REQUEST_ERROR);
+ else
+ sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY);
}
if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
@@ -1546,6 +1599,14 @@ static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
data->bytes_xfered = 0;
else
data->bytes_xfered = data->blksz * data->blocks;
+}
+EXPORT_SYMBOL_GPL(__sdhci_finish_data_common);
+
+static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
+{
+ struct mmc_data *data = host->data;
+
+ __sdhci_finish_data_common(host, false);
/*
* Need to send CMD12 if -
@@ -1660,8 +1721,7 @@ static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
flags |= SDHCI_CMD_INDEX;
/* CMD19 is special in that the Data Present Select should be set */
- if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
- cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
+ if (cmd->data || mmc_op_tuning(cmd->opcode))
flags |= SDHCI_CMD_DATA;
timeout = jiffies;
@@ -1681,8 +1741,8 @@ static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
return true;
}
-static bool sdhci_present_error(struct sdhci_host *host,
- struct mmc_command *cmd, bool present)
+bool sdhci_present_error(struct sdhci_host *host,
+ struct mmc_command *cmd, bool present)
{
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
cmd->error = -ENOMEDIUM;
@@ -1691,6 +1751,7 @@ static bool sdhci_present_error(struct sdhci_host *host,
return false;
}
+EXPORT_SYMBOL_GPL(sdhci_present_error);
static bool sdhci_send_command_retry(struct sdhci_host *host,
struct mmc_command *cmd,
@@ -1706,6 +1767,7 @@ static bool sdhci_send_command_retry(struct sdhci_host *host,
if (!timeout--) {
pr_err("%s: Controller never released inhibit bit(s).\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
cmd->error = -EIO;
return false;
@@ -1836,6 +1898,12 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
case MMC_TIMING_MMC_HS400:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
break;
+ case MMC_TIMING_UHS2_SPEED_A:
+ case MMC_TIMING_UHS2_SPEED_A_HD:
+ case MMC_TIMING_UHS2_SPEED_B:
+ case MMC_TIMING_UHS2_SPEED_B_HD:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_UHS2);
+ break;
default:
pr_warn("%s: Invalid UHS-I mode selected\n",
mmc_hostname(host->mmc));
@@ -1955,6 +2023,7 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -1977,6 +2046,7 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
if (timedout) {
pr_err("%s: PLL clock never stabilised.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -2018,35 +2088,46 @@ static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
}
+unsigned short sdhci_get_vdd_value(unsigned short vdd)
+{
+ switch (1 << vdd) {
+ case MMC_VDD_165_195:
+ /*
+ * Without a regulator, SDHCI does not support 2.0v
+ * so we only get here if the driver deliberately
+ * added the 2.0v range to ocr_avail. Map it to 1.8v
+ * for the purpose of turning on the power.
+ */
+ case MMC_VDD_20_21:
+ return SDHCI_POWER_180;
+ case MMC_VDD_29_30:
+ case MMC_VDD_30_31:
+ return SDHCI_POWER_300;
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ /*
+ * 3.4V ~ 3.6V are valid only for those platforms where it's
+ * known that the voltage range is supported by hardware.
+ */
+ case MMC_VDD_34_35:
+ case MMC_VDD_35_36:
+ return SDHCI_POWER_330;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(sdhci_get_vdd_value);
+
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
unsigned short vdd)
{
u8 pwr = 0;
if (mode != MMC_POWER_OFF) {
- switch (1 << vdd) {
- case MMC_VDD_165_195:
- /*
- * Without a regulator, SDHCI does not support 2.0v
- * so we only get here if the driver deliberately
- * added the 2.0v range to ocr_avail. Map it to 1.8v
- * for the purpose of turning on the power.
- */
- case MMC_VDD_20_21:
- pwr = SDHCI_POWER_180;
- break;
- case MMC_VDD_29_30:
- case MMC_VDD_30_31:
- pwr = SDHCI_POWER_300;
- break;
- case MMC_VDD_32_33:
- case MMC_VDD_33_34:
- pwr = SDHCI_POWER_330;
- break;
- default:
+ pwr = sdhci_get_vdd_value(vdd);
+ if (!pwr) {
WARN(1, "%s: Invalid vdd %#x\n",
mmc_hostname(host->mmc), vdd);
- break;
}
}
@@ -2238,20 +2319,40 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
-void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+static bool sdhci_timing_has_preset(unsigned char timing)
{
- struct sdhci_host *host = mmc_priv(mmc);
- u8 ctrl;
+ switch (timing) {
+ case MMC_TIMING_UHS_SDR12:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ return true;
+ }
+ return false;
+}
- if (ios->power_mode == MMC_POWER_UNDEFINED)
- return;
+static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
+{
+ return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
+ sdhci_timing_has_preset(timing);
+}
- if (host->flags & SDHCI_DEVICE_DEAD) {
- if (!IS_ERR(mmc->supply.vmmc) &&
- ios->power_mode == MMC_POWER_OFF)
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
- return;
- }
+static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
+{
+ /*
+ * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
+ * Frequency. Check if preset values need to be enabled, or the Driver
+ * Strength needs updating. Note, clock changes are handled separately.
+ */
+ return !host->preset_enabled &&
+ (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
+}
+
+void sdhci_set_ios_common(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
/*
* Reset the chip on each power off.
@@ -2266,6 +2367,31 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
(ios->power_mode == MMC_POWER_UP) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
sdhci_enable_preset_value(host, false);
+}
+EXPORT_SYMBOL_GPL(sdhci_set_ios_common);
+
+void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ bool reinit_uhs = host->reinit_uhs;
+ bool turning_on_clk;
+ u8 ctrl;
+
+ host->reinit_uhs = false;
+
+ if (ios->power_mode == MMC_POWER_UNDEFINED)
+ return;
+
+ if (host->flags & SDHCI_DEVICE_DEAD) {
+ if (!IS_ERR(mmc->supply.vmmc) &&
+ ios->power_mode == MMC_POWER_OFF)
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ return;
+ }
+
+ turning_on_clk = ios->clock != host->clock && ios->clock && !host->clock;
+
+ sdhci_set_ios_common(mmc, ios);
if (!ios->clock || ios->clock != host->clock) {
host->ops->set_clock(host, ios->clock);
@@ -2294,6 +2420,17 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->ops->set_bus_width(host, ios->bus_width);
+ /*
+ * Special case to avoid multiple clock changes during voltage
+ * switching.
+ */
+ if (!reinit_uhs &&
+ turning_on_clk &&
+ host->timing == ios->timing &&
+ host->version >= SDHCI_SPEC_300 &&
+ !sdhci_presetable_values_change(host, ios))
+ return;
+
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
@@ -2314,8 +2451,21 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->version >= SDHCI_SPEC_300) {
u16 clk, ctrl_2;
+ /*
+ * According to SDHCI Spec v3.00, if the Preset Value
+ * Enable in the Host Control 2 register is set, we
+ * need to reset SD Clock Enable before changing High
+ * Speed Enable to avoid generating clock glitches.
+ */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (clk & SDHCI_CLOCK_CARD_EN) {
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ }
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
if (!host->preset_enabled) {
- sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/*
* We only need to set Driver Strength if the
* preset value enable is not set.
@@ -2337,60 +2487,26 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
- } else {
- /*
- * According to SDHC Spec v3.00, if the Preset Value
- * Enable in the Host Control 2 register is set, we
- * need to reset SD Clock Enable before changing High
- * Speed Enable to avoid generating clock gliches.
- */
-
- /* Reset SD Clock Enable */
- clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- clk &= ~SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
-
- sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
-
- /* Re-enable SD Clock */
- host->ops->set_clock(host, host->clock);
+ host->drv_type = ios->drv_type;
}
- /* Reset SD Clock Enable */
- clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- clk &= ~SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
-
host->ops->set_uhs_signaling(host, ios->timing);
host->timing = ios->timing;
- if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
- ((ios->timing == MMC_TIMING_UHS_SDR12) ||
- (ios->timing == MMC_TIMING_UHS_SDR25) ||
- (ios->timing == MMC_TIMING_UHS_SDR50) ||
- (ios->timing == MMC_TIMING_UHS_SDR104) ||
- (ios->timing == MMC_TIMING_UHS_DDR50) ||
- (ios->timing == MMC_TIMING_MMC_DDR52))) {
+ if (sdhci_preset_needed(host, ios->timing)) {
u16 preset;
sdhci_enable_preset_value(host, true);
preset = sdhci_get_preset_value(host);
ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
preset);
+ host->drv_type = ios->drv_type;
}
/* Re-enable SD Clock */
host->ops->set_clock(host, host->clock);
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
-
- /*
- * Some (ENE) controllers go apeshit on some ios operation,
- * signalling timeout and CRC errors even on CMD0. Resetting
- * it on each ios seems to solve the problem.
- */
- if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
- sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
}
EXPORT_SYMBOL_GPL(sdhci_set_ios);
@@ -2421,50 +2537,53 @@ static int sdhci_get_cd(struct mmc_host *mmc)
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
-static int sdhci_check_ro(struct sdhci_host *host)
+int sdhci_get_cd_nogpio(struct mmc_host *mmc)
{
+ struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
- int is_readonly;
+ int ret = 0;
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD)
- is_readonly = 0;
- else if (host->ops->get_ro)
- is_readonly = host->ops->get_ro(host);
- else if (mmc_can_gpio_ro(host->mmc))
- is_readonly = mmc_gpio_get_ro(host->mmc);
- else
- is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
- & SDHCI_WRITE_PROTECT);
+ goto out;
+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
spin_unlock_irqrestore(&host->lock, flags);
- /* This quirk needs to be replaced by a callback-function later */
- return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
- !is_readonly : is_readonly;
+ return ret;
}
+EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
-#define SAMPLE_COUNT 5
-
-static int sdhci_get_ro(struct mmc_host *mmc)
+int sdhci_get_ro(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
- int i, ro_count;
-
- if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
- return sdhci_check_ro(host);
+ bool allow_invert = false;
+ int is_readonly;
- ro_count = 0;
- for (i = 0; i < SAMPLE_COUNT; i++) {
- if (sdhci_check_ro(host)) {
- if (++ro_count > SAMPLE_COUNT / 2)
- return 1;
- }
- msleep(30);
+ if (host->flags & SDHCI_DEVICE_DEAD) {
+ is_readonly = 0;
+ } else if (host->ops->get_ro) {
+ is_readonly = host->ops->get_ro(host);
+ } else if (mmc_host_can_gpio_ro(mmc)) {
+ is_readonly = mmc_gpio_get_ro(mmc);
+ /* Do not invert twice */
+ allow_invert = !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
+ } else {
+ is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
+ & SDHCI_WRITE_PROTECT);
+ allow_invert = true;
}
- return 0;
+
+ if (is_readonly >= 0 &&
+ allow_invert &&
+ (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT))
+ is_readonly = !is_readonly;
+
+ return is_readonly;
}
+EXPORT_SYMBOL_GPL(sdhci_get_ro);
static void sdhci_hw_reset(struct mmc_host *mmc)
{
@@ -2679,8 +2798,7 @@ void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
{
sdhci_reset_tuning(host);
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset_for(host, TUNING_ABORT);
sdhci_end_tuning(host);
@@ -2750,7 +2868,7 @@ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
}
EXPORT_SYMBOL_GPL(sdhci_send_tuning);
-static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
{
int i;
@@ -2788,6 +2906,7 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
sdhci_reset_tuning(host);
return -EAGAIN;
}
+EXPORT_SYMBOL_GPL(__sdhci_execute_tuning);
int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
@@ -2858,7 +2977,7 @@ out:
}
EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
-static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
+void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
{
/* Host Controller v3.00 defines preset value registers */
if (host->version < SDHCI_SPEC_300)
@@ -2886,6 +3005,7 @@ static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
host->preset_enabled = enable;
}
}
+EXPORT_SYMBOL_GPL(sdhci_enable_preset_value);
static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
int err)
@@ -2948,8 +3068,7 @@ static void sdhci_card_event(struct mmc_host *mmc)
pr_err("%s: Resetting controller.\n",
mmc_hostname(mmc));
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset_for(host, CARD_REMOVED);
sdhci_error_out_mrqs(host, -ENOMEDIUM);
}
@@ -2964,7 +3083,7 @@ static const struct mmc_host_ops sdhci_ops = {
.set_ios = sdhci_set_ios,
.get_cd = sdhci_get_cd,
.get_ro = sdhci_get_ro,
- .hw_reset = sdhci_hw_reset,
+ .card_hw_reset = sdhci_hw_reset,
.enable_sdio_irq = sdhci_enable_sdio_irq,
.ack_sdio_irq = sdhci_ack_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
@@ -2980,6 +3099,53 @@ static const struct mmc_host_ops sdhci_ops = {
* *
\*****************************************************************************/
+void sdhci_request_done_dma(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (data && data->host_cookie == COOKIE_MAPPED) {
+ if (host->bounce_buffer) {
+ /*
+ * On reads, copy the bounced data into the
+ * sglist
+ */
+ if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
+ unsigned int length = data->bytes_xfered;
+
+ if (length > host->bounce_buffer_size) {
+ pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
+ mmc_hostname(host->mmc),
+ host->bounce_buffer_size,
+ data->bytes_xfered);
+ /* Cap it down and continue */
+ length = host->bounce_buffer_size;
+ }
+ dma_sync_single_for_cpu(mmc_dev(host->mmc),
+ host->bounce_addr,
+ host->bounce_buffer_size,
+ DMA_FROM_DEVICE);
+ sg_copy_from_buffer(data->sg,
+ data->sg_len,
+ host->bounce_buffer,
+ length);
+ } else {
+ /* No copying, just switch ownership */
+ dma_sync_single_for_cpu(mmc_dev(host->mmc),
+ host->bounce_addr,
+ host->bounce_buffer_size,
+ mmc_get_dma_dir(data));
+ }
+ } else {
+ /* Unmap the raw data */
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ mmc_get_dma_dir(data));
+ }
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
+}
+EXPORT_SYMBOL_GPL(sdhci_request_done_dma);
+
static bool sdhci_request_done(struct sdhci_host *host)
{
unsigned long flags;
@@ -3020,12 +3186,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
/* This is to force an update */
host->ops->set_clock(host, host->clock);
- /*
- * Spec says we should do both at the same time, but Ricoh
- * controllers do not like that.
- */
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset_for(host, REQUEST_ERROR);
host->pending_reset = false;
}
@@ -3049,48 +3210,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
sdhci_set_mrq_done(host, mrq);
}
- if (data && data->host_cookie == COOKIE_MAPPED) {
- if (host->bounce_buffer) {
- /*
- * On reads, copy the bounced data into the
- * sglist
- */
- if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
- unsigned int length = data->bytes_xfered;
-
- if (length > host->bounce_buffer_size) {
- pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
- mmc_hostname(host->mmc),
- host->bounce_buffer_size,
- data->bytes_xfered);
- /* Cap it down and continue */
- length = host->bounce_buffer_size;
- }
- dma_sync_single_for_cpu(
- mmc_dev(host->mmc),
- host->bounce_addr,
- host->bounce_buffer_size,
- DMA_FROM_DEVICE);
- sg_copy_from_buffer(data->sg,
- data->sg_len,
- host->bounce_buffer,
- length);
- } else {
- /* No copying, just switch ownership */
- dma_sync_single_for_cpu(
- mmc_dev(host->mmc),
- host->bounce_addr,
- host->bounce_buffer_size,
- mmc_get_dma_dir(data));
- }
- } else {
- /* Unmap the raw data */
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
- }
- data->host_cookie = COOKIE_UNMAPPED;
- }
+ sdhci_request_done_dma(host, mrq);
}
host->mrqs_done[i] = NULL;
@@ -3105,7 +3225,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
return false;
}
-static void sdhci_complete_work(struct work_struct *work)
+void sdhci_complete_work(struct work_struct *work)
{
struct sdhci_host *host = container_of(work, struct sdhci_host,
complete_work);
@@ -3113,19 +3233,21 @@ static void sdhci_complete_work(struct work_struct *work)
while (!sdhci_request_done(host))
;
}
+EXPORT_SYMBOL_GPL(sdhci_complete_work);
static void sdhci_timeout_timer(struct timer_list *t)
{
struct sdhci_host *host;
unsigned long flags;
- host = from_timer(host, t, timer);
+ host = timer_container_of(host, t, timer);
spin_lock_irqsave(&host->lock, flags);
if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, REQ_TIMEOUT);
sdhci_dumpregs(host);
host->cmd->error = -ETIMEDOUT;
@@ -3140,7 +3262,7 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
struct sdhci_host *host;
unsigned long flags;
- host = from_timer(host, t, data_timer);
+ host = timer_container_of(host, t, data_timer);
spin_lock_irqsave(&host->lock, flags);
@@ -3148,6 +3270,7 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
(host->cmd && sdhci_data_line_cmd(host->cmd))) {
pr_err("%s: Timeout waiting for hardware interrupt.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, REQ_TIMEOUT);
sdhci_dumpregs(host);
if (host->data) {
@@ -3199,17 +3322,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
return;
pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
return;
}
if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
- if (intmask & SDHCI_INT_TIMEOUT)
+ if (intmask & SDHCI_INT_TIMEOUT) {
host->cmd->error = -ETIMEDOUT;
- else
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
+ } else {
host->cmd->error = -EILSEQ;
-
+ if (!mmc_op_tuning(host->cmd->opcode))
+ sdhci_err_stats_inc(host, CMD_CRC);
+ }
/* Treat data command CRC error the same as data CRC error */
if (host->cmd->data &&
(intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
@@ -3231,7 +3358,9 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
-ETIMEDOUT :
-EILSEQ;
- if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+ sdhci_err_stats_inc(host, AUTO_CMD);
+
+ if (sdhci_auto_cmd23(host, mrq)) {
mrq->sbc->error = err;
__sdhci_finish_mrq(host, mrq);
return;
@@ -3276,13 +3405,15 @@ static void sdhci_adma_show_error(struct sdhci_host *host)
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
- u32 command;
-
- /* CMD19 generates _only_ Buffer Read Ready interrupt */
- if (intmask & SDHCI_INT_DATA_AVAIL) {
- command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
- if (command == MMC_SEND_TUNING_BLOCK ||
- command == MMC_SEND_TUNING_BLOCK_HS200) {
+ /*
+ * CMD19 generates _only_ Buffer Read Ready interrupt if
+ * use sdhci_send_tuning.
+ * Need to exclude this case: PIO mode and use mmc_send_tuning,
+ * If not, sdhci_transfer_pio will never be called, make the
+ * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
+ */
+ if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
+ if (mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) {
host->tuning_done = 1;
wake_up(&host->buf_ready_int);
return;
@@ -3301,6 +3432,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
host->data_cmd = NULL;
data_cmd->error = -ETIMEDOUT;
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
__sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
@@ -3329,23 +3461,36 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
return;
}
- if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ if (intmask & SDHCI_INT_DATA_TIMEOUT) {
host->data->error = -ETIMEDOUT;
- else if (intmask & SDHCI_INT_DATA_END_BIT)
+ sdhci_err_stats_inc(host, DAT_TIMEOUT);
+ } else if (intmask & SDHCI_INT_DATA_END_BIT) {
host->data->error = -EILSEQ;
- else if ((intmask & SDHCI_INT_DATA_CRC) &&
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ } else if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) &&
SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
- != MMC_BUS_TEST_R)
+ != MMC_BUS_TEST_R) {
host->data->error = -EILSEQ;
- else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ if (intmask & SDHCI_INT_TUNING_ERROR) {
+ u16 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ ctrl2 &= ~SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
+ }
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
intmask);
sdhci_adma_show_error(host);
+ sdhci_err_stats_inc(host, ADMA);
host->data->error = -EIO;
if (host->ops->adma_workaround)
host->ops->adma_workaround(host, intmask);
@@ -3543,13 +3688,14 @@ out:
if (unexpected) {
pr_err("%s: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), unexpected);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
}
return result;
}
-static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
+irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
{
struct sdhci_host *host = dev_id;
struct mmc_command *cmd;
@@ -3579,6 +3725,7 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(sdhci_thread_irq);
/*****************************************************************************\
* *
@@ -3592,7 +3739,7 @@ static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
{
return mmc_card_is_removable(host->mmc) &&
!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
- !mmc_can_gpio_cd(host->mmc);
+ !mmc_host_can_gpio_cd(host->mmc);
}
/*
@@ -3603,7 +3750,7 @@ static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
* sdhci_disable_irq_wakeups() since it will be set by
* sdhci_enable_card_detection() or sdhci_init().
*/
-static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
+bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
SDHCI_WAKE_ON_INT;
@@ -3635,8 +3782,9 @@ static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
return host->irq_wake_enabled;
}
+EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
-static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
+void sdhci_disable_irq_wakeups(struct sdhci_host *host)
{
u8 val;
u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
@@ -3650,6 +3798,7 @@ static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
host->irq_wake_enabled = false;
}
+EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
int sdhci_suspend_host(struct sdhci_host *host)
{
@@ -3686,6 +3835,7 @@ int sdhci_resume_host(struct sdhci_host *host)
sdhci_init(host, 0);
host->pwr = 0;
host->clock = 0;
+ host->reinit_uhs = true;
mmc->ops->set_ios(mmc, &mmc->ios);
} else {
sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
@@ -3708,7 +3858,7 @@ int sdhci_resume_host(struct sdhci_host *host)
EXPORT_SYMBOL_GPL(sdhci_resume_host);
-int sdhci_runtime_suspend_host(struct sdhci_host *host)
+void sdhci_runtime_suspend_host(struct sdhci_host *host)
{
unsigned long flags;
@@ -3725,12 +3875,10 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
spin_lock_irqsave(&host->lock, flags);
host->runtime_suspended = true;
spin_unlock_irqrestore(&host->lock, flags);
-
- return 0;
}
EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
-int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
+void sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
{
struct mmc_host *mmc = host->mmc;
unsigned long flags;
@@ -3748,6 +3896,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
/* Force clock and power re-program */
host->pwr = 0;
host->clock = 0;
+ host->reinit_uhs = true;
mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
mmc->ops->set_ios(mmc, &mmc->ios);
@@ -3775,8 +3924,6 @@ int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
sdhci_enable_card_detection(host);
spin_unlock_irqrestore(&host->lock, flags);
-
- return 0;
}
EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
@@ -3843,10 +3990,8 @@ void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
host->cqe_on = false;
- if (recovery) {
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
- }
+ if (recovery)
+ sdhci_reset_for(host, CQE_RECOVERY);
pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
mmc_hostname(mmc), host->ier,
@@ -3864,20 +4009,27 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (!host->cqe_on)
return false;
- if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
+ if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
*cmd_error = -EILSEQ;
- else if (intmask & SDHCI_INT_TIMEOUT)
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, CMD_CRC);
+ } else if (intmask & SDHCI_INT_TIMEOUT) {
*cmd_error = -ETIMEDOUT;
- else
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
+ } else
*cmd_error = 0;
- if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
+ if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) {
*data_error = -EILSEQ;
- else if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ } else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
*data_error = -ETIMEDOUT;
- else if (intmask & SDHCI_INT_ADMA_ERROR)
+ sdhci_err_stats_inc(host, DAT_TIMEOUT);
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
*data_error = -EIO;
- else
+ sdhci_err_stats_inc(host, ADMA);
+ } else
*data_error = 0;
/* Clear selected interrupts. */
@@ -3893,6 +4045,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
sdhci_writel(host, intmask, SDHCI_INT_STATUS);
pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
}
@@ -3914,7 +4067,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
WARN_ON(dev == NULL);
- mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
+ mmc = devm_mmc_alloc_host(dev, sizeof(struct sdhci_host) + priv_size);
if (!mmc)
return ERR_PTR(-ENOMEM);
@@ -3939,6 +4092,12 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
* descriptor for each segment, plus 1 for a nop end descriptor.
*/
host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
+ host->max_adma = 65536;
+
+ host->max_timeout_count = 0xE;
+
+ host->complete_work_fn = sdhci_complete_work;
+ host->thread_irq_fn = sdhci_thread_irq;
return host;
}
@@ -3993,7 +4152,7 @@ void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
if (debug_quirks2)
host->quirks2 = debug_quirks2;
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
if (host->v4_mode)
sdhci_do_enable_v4_mode(host);
@@ -4006,9 +4165,6 @@ void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
- if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
- return;
-
if (caps) {
host->caps = *caps;
} else {
@@ -4602,10 +4758,27 @@ int sdhci_setup_host(struct sdhci_host *host)
* be larger than 64 KiB though.
*/
if (host->flags & SDHCI_USE_ADMA) {
- if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
+ host->max_adma = 65532; /* 32-bit alignment */
mmc->max_seg_size = 65535;
- else
+ /*
+ * sdhci_adma_table_pre() expects to define 1 DMA
+ * descriptor per segment, so the maximum segment size
+ * is set accordingly. SDHCI allows up to 64KiB per DMA
+ * descriptor (16-bit field), but some controllers do
+ * not support "zero means 65536" reducing the maximum
+ * for them to 65535. That is a problem if PAGE_SIZE is
+ * 64KiB because the block layer does not support
+ * max_seg_size < PAGE_SIZE, however
+ * sdhci_adma_table_pre() has a workaround to handle
+ * that case, and split the descriptor. Refer also
+ * comment in sdhci_adma_table_pre().
+ */
+ if (mmc->max_seg_size < PAGE_SIZE)
+ mmc->max_seg_size = PAGE_SIZE;
+ } else {
mmc->max_seg_size = 65536;
+ }
} else {
mmc->max_seg_size = mmc->max_req_size;
}
@@ -4690,7 +4863,7 @@ int __sdhci_add_host(struct sdhci_host *host)
if (!host->complete_wq)
return -ENOMEM;
- INIT_WORK(&host->complete_work, sdhci_complete_work);
+ INIT_WORK(&host->complete_work, host->complete_work_fn);
timer_setup(&host->timer, sdhci_timeout_timer, 0);
timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
@@ -4699,7 +4872,7 @@ int __sdhci_add_host(struct sdhci_host *host)
sdhci_init(host, 0);
- ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
+ ret = request_threaded_irq(host->irq, sdhci_irq, host->thread_irq_fn,
IRQF_SHARED, mmc_hostname(mmc), host);
if (ret) {
pr_err("%s: Failed to request IRQ %d: %d\n",
@@ -4732,7 +4905,7 @@ int __sdhci_add_host(struct sdhci_host *host)
unled:
sdhci_led_unregister(host);
unirq:
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
@@ -4790,14 +4963,14 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
sdhci_led_unregister(host);
if (!dead)
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
- del_timer_sync(&host->timer);
- del_timer_sync(&host->data_timer);
+ timer_delete_sync(&host->timer);
+ timer_delete_sync(&host->data_timer);
destroy_workqueue(host->complete_wq);
@@ -4818,13 +4991,6 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
EXPORT_SYMBOL_GPL(sdhci_remove_host);
-void sdhci_free_host(struct sdhci_host *host)
-{
- mmc_free_host(host->mmc);
-}
-
-EXPORT_SYMBOL_GPL(sdhci_free_host);
-
/*****************************************************************************\
* *
* Driver init/exit *
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 074dc182b184..b6a571d866fa 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -43,8 +43,23 @@
#define SDHCI_TRNS_READ 0x10
#define SDHCI_TRNS_MULTI 0x20
+/*
+ * Defined in Host Version 4.0.
+ */
+#define SDHCI_TRNS_RES_TYPE 0x40
+#define SDHCI_TRNS_RES_ERR_CHECK 0x80
+#define SDHCI_TRNS_RES_INT_DIS 0x0100
+
#define SDHCI_COMMAND 0x0E
#define SDHCI_CMD_RESP_MASK 0x03
+
+/*
+ * Host Version 4.10 adds this bit to distinguish a main command or
+ * sub command.
+ * For example with SDIO, CMD52 (sub command) issued during CMD53 (main command).
+ */
+#define SDHCI_CMD_SUB_CMD 0x04
+
#define SDHCI_CMD_CRC 0x08
#define SDHCI_CMD_INDEX 0x10
#define SDHCI_CMD_DATA 0x20
@@ -65,6 +80,9 @@
#define SDHCI_PRESENT_STATE 0x24
#define SDHCI_CMD_INHIBIT 0x00000001
#define SDHCI_DATA_INHIBIT 0x00000002
+
+#define SDHCI_DAT_4_TO_7_LVL_MASK 0x000000F0
+
#define SDHCI_DOING_WRITE 0x00000100
#define SDHCI_DOING_READ 0x00000200
#define SDHCI_SPACE_AVAILABLE 0x00000400
@@ -80,6 +98,15 @@
#define SDHCI_DATA_0_LVL_MASK 0x00100000
#define SDHCI_CMD_LVL 0x01000000
+/* Host Version 4.10 */
+
+#define SDHCI_HOST_REGULATOR_STABLE 0x02000000
+#define SDHCI_CMD_NOT_ISSUED_ERR 0x08000000
+#define SDHCI_SUB_CMD_STATUS 0x10000000
+#define SDHCI_UHS2_IN_DORMANT_STATE 0x20000000
+#define SDHCI_UHS2_LANE_SYNC 0x40000000
+#define SDHCI_UHS2_IF_DETECT 0x80000000
+
#define SDHCI_HOST_CONTROL 0x28
#define SDHCI_CTRL_LED 0x01
#define SDHCI_CTRL_4BITBUS 0x02
@@ -99,6 +126,13 @@
#define SDHCI_POWER_180 0x0A
#define SDHCI_POWER_300 0x0C
#define SDHCI_POWER_330 0x0E
+/*
+ * VDD2 - UHS2 or PCIe/NVMe
+ * VDD2 power on/off and voltage select
+ */
+#define SDHCI_VDD2_POWER_ON 0x10
+#define SDHCI_VDD2_POWER_120 0x80
+#define SDHCI_VDD2_POWER_180 0xA0
#define SDHCI_BLOCK_GAP_CONTROL 0x2A
@@ -110,7 +144,7 @@
#define SDHCI_CLOCK_CONTROL 0x2C
#define SDHCI_DIVIDER_SHIFT 8
#define SDHCI_DIVIDER_HI_SHIFT 6
-#define SDHCI_DIV_MASK 0xFF
+#define SDHCI_DIV_MASK 0xFF
#define SDHCI_DIV_MASK_LEN 8
#define SDHCI_DIV_HI_MASK 0x300
#define SDHCI_PROG_CLOCK_MODE 0x0020
@@ -139,6 +173,10 @@
#define SDHCI_INT_CARD_REMOVE 0x00000080
#define SDHCI_INT_CARD_INT 0x00000100
#define SDHCI_INT_RETUNE 0x00001000
+
+/* Host Version 4.10 */
+#define SDHCI_INT_FX_EVENT 0x00002000
+
#define SDHCI_INT_CQE 0x00004000
#define SDHCI_INT_ERROR 0x00008000
#define SDHCI_INT_TIMEOUT 0x00010000
@@ -151,6 +189,10 @@
#define SDHCI_INT_BUS_POWER 0x00800000
#define SDHCI_INT_AUTO_CMD_ERR 0x01000000
#define SDHCI_INT_ADMA_ERROR 0x02000000
+#define SDHCI_INT_TUNING_ERROR 0x04000000
+
+/* Host Version 4.0 */
+#define SDHCI_INT_RESP_ERR 0x08000000
#define SDHCI_INT_NORMAL_MASK 0x00007FFF
#define SDHCI_INT_ERROR_MASK 0xFFFF8000
@@ -162,7 +204,7 @@
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
- SDHCI_INT_BLK_GAP)
+ SDHCI_INT_BLK_GAP | SDHCI_INT_TUNING_ERROR)
#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
#define SDHCI_CQE_INT_ERR_MASK ( \
@@ -178,6 +220,9 @@
#define SDHCI_AUTO_CMD_END_BIT 0x00000008
#define SDHCI_AUTO_CMD_INDEX 0x00000010
+/* Host Version 4.10 */
+#define SDHCI_AUTO_CMD_RESP_ERR 0x0020
+
#define SDHCI_HOST_CONTROL2 0x3E
#define SDHCI_CTRL_UHS_MASK 0x0007
#define SDHCI_CTRL_UHS_SDR12 0x0000
@@ -186,6 +231,7 @@
#define SDHCI_CTRL_UHS_SDR104 0x0003
#define SDHCI_CTRL_UHS_DDR50 0x0004
#define SDHCI_CTRL_HS400 0x0005 /* Non-standard */
+#define SDHCI_CTRL_UHS2 0x0007
#define SDHCI_CTRL_VDD_180 0x0008
#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030
#define SDHCI_CTRL_DRV_TYPE_B 0x0000
@@ -194,9 +240,12 @@
#define SDHCI_CTRL_DRV_TYPE_D 0x0030
#define SDHCI_CTRL_EXEC_TUNING 0x0040
#define SDHCI_CTRL_TUNED_CLK 0x0080
+#define SDHCI_CTRL_UHS2_ENABLE 0x0100
+#define SDHCI_CTRL_ADMA2_LEN_MODE 0x0400
#define SDHCI_CMD23_ENABLE 0x0800
#define SDHCI_CTRL_V4_MODE 0x1000
#define SDHCI_CTRL_64BIT_ADDR 0x2000
+#define SDHCI_CTRL_ASYNC_INT_ENABLE 0x4000
#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
#define SDHCI_CAPABILITIES 0x40
@@ -219,11 +268,13 @@
#define SDHCI_CAN_VDD_180 0x04000000
#define SDHCI_CAN_64BIT_V4 0x08000000
#define SDHCI_CAN_64BIT 0x10000000
+#define SDHCI_CAN_ASYNC_INT 0x20000000
#define SDHCI_CAPABILITIES_1 0x44
#define SDHCI_SUPPORT_SDR50 0x00000001
#define SDHCI_SUPPORT_SDR104 0x00000002
#define SDHCI_SUPPORT_DDR50 0x00000004
+#define SDHCI_SUPPORT_UHS2 0x00000008
#define SDHCI_DRIVER_TYPE_A 0x00000010
#define SDHCI_DRIVER_TYPE_C 0x00000020
#define SDHCI_DRIVER_TYPE_D 0x00000040
@@ -232,6 +283,7 @@
#define SDHCI_RETUNING_MODE_MASK GENMASK(15, 14)
#define SDHCI_CLOCK_MUL_MASK GENMASK(23, 16)
#define SDHCI_CAN_DO_ADMA3 0x08000000
+#define SDHCI_CAN_VDD2_180 0x10000000 /* UHS-2 1.8V VDD2 */
#define SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */
#define SDHCI_MAX_CURRENT 0x48
@@ -239,11 +291,14 @@
#define SDHCI_MAX_CURRENT_330_MASK GENMASK(7, 0)
#define SDHCI_MAX_CURRENT_300_MASK GENMASK(15, 8)
#define SDHCI_MAX_CURRENT_180_MASK GENMASK(23, 16)
+#define SDHCI_MAX_CURRENT_1 0x4C
+#define SDHCI_MAX_CURRENT_VDD2_180_MASK GENMASK(7, 0) /* UHS2 */
#define SDHCI_MAX_CURRENT_MULTIPLIER 4
/* 4C-4F reserved for more max current */
#define SDHCI_SET_ACMD12_ERROR 0x50
+/* Host Version 4.10 */
#define SDHCI_SET_INT_ERROR 0x52
#define SDHCI_ADMA_ERROR 0x54
@@ -262,10 +317,15 @@
#define SDHCI_PRESET_FOR_SDR104 0x6C
#define SDHCI_PRESET_FOR_DDR50 0x6E
#define SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */
+
+/* UHS2 */
+#define SDHCI_PRESET_FOR_UHS2 0x74
#define SDHCI_PRESET_DRV_MASK GENMASK(15, 14)
#define SDHCI_PRESET_CLKGEN_SEL BIT(10)
#define SDHCI_PRESET_SDCLK_FREQ_MASK GENMASK(9, 0)
+#define SDHCI_ADMA3_ADDRESS 0x78
+
#define SDHCI_SLOT_INT_STATUS 0xFC
#define SDHCI_HOST_VERSION 0xFE
@@ -340,11 +400,12 @@ struct sdhci_adma2_64_desc {
/*
* Maximum segments assuming a 512KiB maximum requisition size and a minimum
- * 4KiB page size.
+ * 4KiB page size. Note this also allows enough for multiple descriptors in
+ * case of PAGE_SIZE >= 64KiB.
*/
#define SDHCI_MAX_SEGS 128
-/* Allow for a a command request and a data request at the same time */
+/* Allow for a command request and a data request at the same time */
#define SDHCI_MAX_MRQS 2
/*
@@ -355,6 +416,9 @@ struct sdhci_adma2_64_desc {
*/
#define MMC_CMD_TRANSFER_TIME (10 * NSEC_PER_MSEC) /* max 10 ms */
+#define sdhci_err_stats_inc(host, err_name) \
+ mmc_debugfs_err_stats_inc((host)->mmc, MMC_ERR_##err_name)
+
enum sdhci_cookie {
COOKIE_UNMAPPED,
COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */
@@ -375,8 +439,6 @@ struct sdhci_host {
#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
/* Controller doesn't like clearing the power reg before a change */
#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
-/* Controller has flaky internal state so reset it on each ios change */
-#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
/* Controller has an unusable DMA engine */
#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
/* Controller has an unusable ADMA engine */
@@ -421,16 +483,12 @@ struct sdhci_host {
#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
/* Controller cannot support End Attribute in NOP ADMA descriptor */
#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
-/* Controller is missing device caps. Use caps provided by host */
-#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
/* Controller uses Auto CMD12 command to stop the transfer */
#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
/* Controller treats ADMA descriptors with length 0000h incorrectly */
#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30)
-/* The read-only detection via SDHCI_PRESENT_STATE register is unstable */
-#define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1<<31)
unsigned int quirks2; /* More deviations from spec. */
@@ -476,6 +534,8 @@ struct sdhci_host {
* block count.
*/
#define SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1<<18)
+/* Issue CMD and DATA reset together */
+#define SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER (1<<19)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -517,10 +577,13 @@ struct sdhci_host {
unsigned int max_clk; /* Max possible freq (MHz) */
unsigned int timeout_clk; /* Timeout freq (KHz) */
+ u8 max_timeout_count; /* Vendor specific max timeout count */
unsigned int clk_mul; /* Clock Muliplier value */
unsigned int clock; /* Current clock (MHz) */
u8 pwr; /* Current voltage */
+ u8 drv_type; /* Current UHS-I driver type */
+ bool reinit_uhs; /* Force UHS-related re-initialization */
bool runtime_suspended; /* Host is runtime suspended */
bool bus_on; /* Bus power prevents runtime suspend */
@@ -542,6 +605,7 @@ struct sdhci_host {
unsigned int blocks; /* remaining PIO blocks */
int sg_count; /* Mapped sg entries */
+ int max_adma; /* Max. length in ADMA descriptor */
void *adma_table; /* ADMA descriptor table */
void *align_buffer; /* Bounce buffer */
@@ -561,6 +625,9 @@ struct sdhci_host {
struct timer_list timer; /* Timer for timeouts */
struct timer_list data_timer; /* Timer for data timeouts */
+ void (*complete_work_fn)(struct work_struct *work);
+ irqreturn_t (*thread_irq_fn)(int irq, void *dev_id);
+
#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
struct dma_chan *rx_chan;
struct dma_chan *tx_chan;
@@ -655,6 +722,8 @@ struct sdhci_ops {
void (*request_done)(struct sdhci_host *host,
struct mmc_request *mrq);
void (*dump_vendor_regs)(struct sdhci_host *host);
+ void (*dump_uhs2_regs)(struct sdhci_host *host);
+ void (*uhs2_pre_detect_init)(struct sdhci_host *host);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -742,14 +811,12 @@ static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
#endif /* CONFIG_MMC_SDHCI_IO_ACCESSORS */
struct sdhci_host *sdhci_alloc_host(struct device *dev, size_t priv_size);
-void sdhci_free_host(struct sdhci_host *host);
static inline void *sdhci_priv(struct sdhci_host *host)
{
return host->private;
}
-void sdhci_card_detect(struct sdhci_host *host);
void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
const u32 *caps, const u32 *caps1);
int sdhci_setup_host(struct sdhci_host *host);
@@ -763,6 +830,15 @@ static inline void sdhci_read_caps(struct sdhci_host *host)
__sdhci_read_caps(host, NULL, NULL, NULL);
}
+bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq);
+bool sdhci_data_line_cmd(struct mmc_command *cmd);
+void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, unsigned long timeout);
+void sdhci_initialize_data(struct sdhci_host *host, struct mmc_data *data);
+void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data);
+void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq);
+void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq);
+void __sdhci_finish_data_common(struct sdhci_host *host, bool defer_reset);
+bool sdhci_present_error(struct sdhci_host *host, struct mmc_command *cmd, bool present);
u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
unsigned int *actual_clock);
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
@@ -772,26 +848,45 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
unsigned char mode,
unsigned short vdd);
+unsigned short sdhci_get_vdd_value(unsigned short vdd);
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
+int sdhci_get_cd_nogpio(struct mmc_host *mmc);
+int sdhci_get_ro(struct mmc_host *mmc);
void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
+bool sdhci_do_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
+int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode);
+void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
+void sdhci_set_ios_common(struct mmc_host *mmc, struct mmc_ios *ios);
void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios);
void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
+void sdhci_request_done_dma(struct sdhci_host *host, struct mmc_request *mrq);
+void sdhci_complete_work(struct work_struct *work);
+irqreturn_t sdhci_thread_irq(int irq, void *dev_id);
void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
dma_addr_t addr, int len, unsigned int cmd);
#ifdef CONFIG_PM
+bool sdhci_enable_irq_wakeups(struct sdhci_host *host);
+void sdhci_disable_irq_wakeups(struct sdhci_host *host);
int sdhci_suspend_host(struct sdhci_host *host);
int sdhci_resume_host(struct sdhci_host *host);
-int sdhci_runtime_suspend_host(struct sdhci_host *host);
-int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset);
+void sdhci_runtime_suspend_host(struct sdhci_host *host);
+void sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset);
+#else
+static inline bool sdhci_enable_irq_wakeups(struct sdhci_host *host) { return false; }
+static inline void sdhci_disable_irq_wakeups(struct sdhci_host *host) {}
+static inline int sdhci_suspend_host(struct sdhci_host *host) { return -EOPNOTSUPP; }
+static inline int sdhci_resume_host(struct sdhci_host *host) { return -EOPNOTSUPP; }
+static inline void sdhci_runtime_suspend_host(struct sdhci_host *host) {}
+static inline void sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) {}
#endif
void sdhci_cqe_enable(struct mmc_host *mmc);
@@ -811,4 +906,20 @@ void sdhci_switch_external_dma(struct sdhci_host *host, bool en);
void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+#define SDHCI_DBG_ANYWAY 0
+#elif defined(DEBUG)
+#define SDHCI_DBG_ANYWAY 1
+#else
+#define SDHCI_DBG_ANYWAY 0
+#endif
+
+#define sdhci_dbg_dumpregs(host, fmt) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor) || SDHCI_DBG_ANYWAY) \
+ sdhci_dumpregs(host); \
+} while (0)
+
#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index f654afbe8e83..d235b0aecfdb 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -15,6 +15,7 @@
#include <linux/sys_soc.h>
#include "cqhci.h"
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
/* CTL_CFG Registers */
@@ -84,15 +85,16 @@
#define DRIVER_STRENGTH_40_OHM 0x4
#define CLOCK_TOO_SLOW_HZ 50000000
+#define SDHCI_AM654_AUTOSUSPEND_DELAY -1
+#define RETRY_TUNING_MAX 10
/* Command Queue Host Controller Interface Base address */
#define SDHCI_AM654_CQE_BASE_ADDR 0x200
-static struct regmap_config sdhci_am654_regmap_config = {
+static const struct regmap_config sdhci_am654_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
struct timing_data {
@@ -139,19 +141,33 @@ static const struct timing_data td[] = {
struct sdhci_am654_data {
struct regmap *base;
- bool legacy_otapdly;
- int otap_del_sel[ARRAY_SIZE(td)];
- int itap_del_sel[ARRAY_SIZE(td)];
+ u32 otap_del_sel[ARRAY_SIZE(td)];
+ u32 itap_del_sel[ARRAY_SIZE(td)];
+ u32 itap_del_ena[ARRAY_SIZE(td)];
int clkbuf_sel;
int trm_icp;
int drv_strength;
int strb_sel;
u32 flags;
+ u32 quirks;
+ bool dll_enable;
+ u32 tuning_loop;
+
+#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
+#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1)
+#define SDHCI_AM654_QUIRK_DISABLE_HS400 BIT(2)
+};
+
+struct window {
+ u8 start;
+ u8 end;
+ u8 length;
};
struct sdhci_am654_driver_data {
const struct sdhci_pltfm_data *pdata;
u32 flags;
+ u32 quirks;
#define IOMUX_PRESENT (1 << 0)
#define FREQSEL_2_BIT (1 << 1)
#define STRBSEL_4_BIT (1 << 2)
@@ -228,11 +244,13 @@ static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock)
}
static void sdhci_am654_write_itapdly(struct sdhci_am654_data *sdhci_am654,
- u32 itapdly)
+ u32 itapdly, u32 enable)
{
/* Set ITAPCHGWIN before writing to ITAPDLY */
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
1 << ITAPCHGWIN_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
+ enable << ITAPDLYENA_SHIFT);
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYSEL_MASK,
itapdly << ITAPDLYSEL_SHIFT);
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
@@ -249,8 +267,8 @@ static void sdhci_am654_setup_delay_chain(struct sdhci_am654_data *sdhci_am654,
mask = SELDLYTXCLK_MASK | SELDLYRXCLK_MASK;
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val);
- sdhci_am654_write_itapdly(sdhci_am654,
- sdhci_am654->itap_del_sel[timing]);
+ sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
+ sdhci_am654->itap_del_ena[timing]);
}
static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
@@ -259,23 +277,17 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
unsigned char timing = host->mmc->ios.timing;
u32 otap_del_sel;
- u32 otap_del_ena;
u32 mask, val;
regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0);
sdhci_set_clock(host, clock);
- /* Setup DLL Output TAP delay */
- if (sdhci_am654->legacy_otapdly)
- otap_del_sel = sdhci_am654->otap_del_sel[0];
- else
- otap_del_sel = sdhci_am654->otap_del_sel[timing];
-
- otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0;
+ /* Setup Output TAP delay */
+ otap_del_sel = sdhci_am654->otap_del_sel[timing];
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
- val = (otap_del_ena << OTAPDLYENA_SHIFT) |
+ val = (0x1 << OTAPDLYENA_SHIFT) |
(otap_del_sel << OTAPDLYSEL_SHIFT);
/* Write to STRBSEL for HS400 speed mode */
@@ -290,10 +302,21 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
- if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ)
+ if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ) {
sdhci_am654_setup_dll(host, clock);
- else
+ sdhci_am654->dll_enable = true;
+
+ if (timing == MMC_TIMING_MMC_HS400) {
+ sdhci_am654->itap_del_ena[timing] = 0x1;
+ sdhci_am654->itap_del_sel[timing] = sdhci_am654->itap_del_sel[timing - 1];
+ }
+
+ sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
+ sdhci_am654->itap_del_ena[timing]);
+ } else {
sdhci_am654_setup_delay_chain(sdhci_am654, timing);
+ sdhci_am654->dll_enable = false;
+ }
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
sdhci_am654->clkbuf_sel);
@@ -306,25 +329,58 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
unsigned char timing = host->mmc->ios.timing;
u32 otap_del_sel;
+ u32 itap_del_ena;
+ u32 itap_del_sel;
u32 mask, val;
- /* Setup DLL Output TAP delay */
- if (sdhci_am654->legacy_otapdly)
- otap_del_sel = sdhci_am654->otap_del_sel[0];
- else
- otap_del_sel = sdhci_am654->otap_del_sel[timing];
+ /* Setup Output TAP delay */
+ otap_del_sel = sdhci_am654->otap_del_sel[timing];
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
val = (0x1 << OTAPDLYENA_SHIFT) |
(otap_del_sel << OTAPDLYSEL_SHIFT);
- regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ /* Setup Input TAP delay */
+ itap_del_ena = sdhci_am654->itap_del_ena[timing];
+ itap_del_sel = sdhci_am654->itap_del_sel[timing];
+
+ mask |= ITAPDLYENA_MASK | ITAPDLYSEL_MASK;
+ val |= (itap_del_ena << ITAPDLYENA_SHIFT) |
+ (itap_del_sel << ITAPDLYSEL_SHIFT);
+
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
+ 1 << ITAPCHGWIN_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
sdhci_am654->clkbuf_sel);
sdhci_set_clock(host, clock);
}
+static int sdhci_am654_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ if ((sdhci_am654->quirks & SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA) &&
+ ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
+ if (ret < 0) {
+ pr_err("%s: Switching to 1.8V signalling voltage failed,\n",
+ mmc_hostname(mmc));
+ return -EIO;
+ }
+ }
+ return 0;
+ }
+
+ return sdhci_start_signal_voltage_switch(mmc, ios);
+}
+
static u8 sdhci_am654_write_power_on(struct sdhci_host *host, u8 val, int reg)
{
writeb(val, host->ioaddr + reg);
@@ -347,8 +403,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
*/
case MMC_TIMING_SD_HS:
case MMC_TIMING_MMC_HS:
- case MMC_TIMING_UHS_SDR12:
- case MMC_TIMING_UHS_SDR25:
val &= ~SDHCI_CTRL_HISPD;
}
}
@@ -365,7 +419,22 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
MAX_POWER_ON_TIMEOUT, false, host, val,
reg);
if (ret)
- dev_warn(mmc_dev(host->mmc), "Power on failed\n");
+ dev_info(mmc_dev(host->mmc), "Power on failed\n");
+ }
+}
+
+static void sdhci_am654_reset(struct sdhci_host *host, u8 mask)
+{
+ u8 ctrl;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_and_cqhci_reset(host, mask);
+
+ if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_FORCE_CDTEST) {
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl |= SDHCI_CTRL_CDTEST_INS | SDHCI_CTRL_CDTEST_EN;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
}
@@ -398,45 +467,137 @@ static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
return 0;
}
-#define ITAP_MAX 32
-static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
- u32 opcode)
+#define ITAPDLY_LENGTH 32
+#define ITAPDLY_LAST_INDEX (ITAPDLY_LENGTH - 1)
+
+static int sdhci_am654_calculate_itap(struct sdhci_host *host, struct window
+ *fail_window, u8 num_fails, bool circular_buffer)
+{
+ u8 itap = 0, start_fail = 0, end_fail = 0, pass_length = 0;
+ u8 first_fail_start = 0, last_fail_end = 0;
+ struct device *dev = mmc_dev(host->mmc);
+ struct window pass_window = {0, 0, 0};
+ int prev_fail_end = -1;
+ u8 i;
+
+ if (!num_fails) {
+ /* Retry tuning */
+ dev_dbg(dev, "No failing region found, retry tuning\n");
+ return -1;
+ }
+
+ if (fail_window->length == ITAPDLY_LENGTH) {
+ /* Retry tuning */
+ dev_dbg(dev, "No passing itapdly, retry tuning\n");
+ return -1;
+ }
+
+ first_fail_start = fail_window->start;
+ last_fail_end = fail_window[num_fails - 1].end;
+
+ for (i = 0; i < num_fails; i++) {
+ start_fail = fail_window[i].start;
+ end_fail = fail_window[i].end;
+ pass_length = start_fail - (prev_fail_end + 1);
+
+ if (pass_length > pass_window.length) {
+ pass_window.start = prev_fail_end + 1;
+ pass_window.length = pass_length;
+ }
+ prev_fail_end = end_fail;
+ }
+
+ if (!circular_buffer)
+ pass_length = ITAPDLY_LAST_INDEX - last_fail_end;
+ else
+ pass_length = ITAPDLY_LAST_INDEX - last_fail_end + first_fail_start;
+
+ if (pass_length > pass_window.length) {
+ pass_window.start = last_fail_end + 1;
+ pass_window.length = pass_length;
+ }
+
+ if (!circular_buffer)
+ itap = pass_window.start + (pass_window.length >> 1);
+ else
+ itap = (pass_window.start + (pass_window.length >> 1)) % ITAPDLY_LENGTH;
+
+ return (itap > ITAPDLY_LAST_INDEX) ? ITAPDLY_LAST_INDEX >> 1 : itap;
+}
+
+static int sdhci_am654_do_tuning(struct sdhci_host *host,
+ u32 opcode)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
- int cur_val, prev_val = 1, fail_len = 0, pass_window = 0, pass_len;
- u32 itap;
+ unsigned char timing = host->mmc->ios.timing;
+ struct window fail_window[ITAPDLY_LENGTH];
+ struct device *dev = mmc_dev(host->mmc);
+ u8 curr_pass, itap;
+ u8 fail_index = 0;
+ u8 prev_pass = 1;
+
+ memset(fail_window, 0, sizeof(fail_window));
/* Enable ITAPDLY */
- regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
- 1 << ITAPDLYENA_SHIFT);
+ sdhci_am654->itap_del_ena[timing] = 0x1;
+
+ for (itap = 0; itap < ITAPDLY_LENGTH; itap++) {
+ sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
- for (itap = 0; itap < ITAP_MAX; itap++) {
- sdhci_am654_write_itapdly(sdhci_am654, itap);
+ curr_pass = !mmc_send_tuning(host->mmc, opcode, NULL);
- cur_val = !mmc_send_tuning(host->mmc, opcode, NULL);
- if (cur_val && !prev_val)
- pass_window = itap;
+ if (!curr_pass && prev_pass)
+ fail_window[fail_index].start = itap;
+
+ if (!curr_pass) {
+ fail_window[fail_index].end = itap;
+ fail_window[fail_index].length++;
+ dev_dbg(dev, "Failed itapdly=%d\n", itap);
+ }
- if (!cur_val)
- fail_len++;
+ if (curr_pass && !prev_pass)
+ fail_index++;
- prev_val = cur_val;
+ prev_pass = curr_pass;
}
- /*
- * Having determined the length of the failing window and start of
- * the passing window calculate the length of the passing window and
- * set the final value halfway through it considering the range as a
- * circular buffer
- */
- pass_len = ITAP_MAX - fail_len;
- itap = (pass_window + (pass_len >> 1)) % ITAP_MAX;
- sdhci_am654_write_itapdly(sdhci_am654, itap);
+
+ if (fail_window[fail_index].length != 0)
+ fail_index++;
+
+ return sdhci_am654_calculate_itap(host, fail_window, fail_index,
+ sdhci_am654->dll_enable);
+}
+
+static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
+ u32 opcode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ unsigned char timing = host->mmc->ios.timing;
+ struct device *dev = mmc_dev(host->mmc);
+ int itapdly;
+
+ do {
+ itapdly = sdhci_am654_do_tuning(host, opcode);
+ if (itapdly >= 0)
+ break;
+ } while (++sdhci_am654->tuning_loop < RETRY_TUNING_MAX);
+
+ if (itapdly < 0) {
+ dev_err(dev, "Failed to find itapdly, fail tuning\n");
+ return -1;
+ }
+
+ dev_dbg(dev, "Passed tuning, final itapdly=%d\n", itapdly);
+ sdhci_am654_write_itapdly(sdhci_am654, itapdly, sdhci_am654->itap_del_ena[timing]);
+ /* Save ITAPDLY */
+ sdhci_am654->itap_del_sel[timing] = itapdly;
return 0;
}
-static struct sdhci_ops sdhci_am654_ops = {
+static const struct sdhci_ops sdhci_am654_ops = {
.platform_execute_tuning = sdhci_am654_platform_execute_tuning,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -446,13 +607,14 @@ static struct sdhci_ops sdhci_am654_ops = {
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
- .reset = sdhci_reset,
+ .reset = sdhci_and_cqhci_reset,
};
static const struct sdhci_pltfm_data sdhci_am654_pdata = {
.ops = &sdhci_am654_ops,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
};
static const struct sdhci_am654_driver_data sdhci_am654_sr1_drvdata = {
@@ -466,7 +628,7 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
.flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
};
-static struct sdhci_ops sdhci_j721e_8bit_ops = {
+static const struct sdhci_ops sdhci_j721e_8bit_ops = {
.platform_execute_tuning = sdhci_am654_platform_execute_tuning,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -476,13 +638,14 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
- .reset = sdhci_reset,
+ .reset = sdhci_and_cqhci_reset,
};
static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
.ops = &sdhci_j721e_8bit_ops,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
};
static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = {
@@ -490,7 +653,7 @@ static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = {
.flags = DLL_PRESENT | DLL_CALIB,
};
-static struct sdhci_ops sdhci_j721e_4bit_ops = {
+static const struct sdhci_ops sdhci_j721e_4bit_ops = {
.platform_execute_tuning = sdhci_am654_platform_execute_tuning,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -500,13 +663,14 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
.set_clock = sdhci_j721e_4bit_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
- .reset = sdhci_reset,
+ .reset = sdhci_am654_reset,
};
static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
.ops = &sdhci_j721e_4bit_ops,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
};
static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
@@ -514,24 +678,10 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
.flags = IOMUX_PRESENT,
};
-static const struct sdhci_pltfm_data sdhci_am64_8bit_pdata = {
- .ops = &sdhci_j721e_8bit_ops,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
-};
-
-static const struct sdhci_am654_driver_data sdhci_am64_8bit_drvdata = {
- .pdata = &sdhci_am64_8bit_pdata,
- .flags = DLL_PRESENT | DLL_CALIB,
-};
-
-static const struct sdhci_pltfm_data sdhci_am64_4bit_pdata = {
- .ops = &sdhci_j721e_4bit_ops,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
-};
-
-static const struct sdhci_am654_driver_data sdhci_am64_4bit_drvdata = {
- .pdata = &sdhci_am64_4bit_pdata,
+static const struct sdhci_am654_driver_data sdhci_am62_4bit_drvdata = {
+ .pdata = &sdhci_j721e_4bit_pdata,
.flags = IOMUX_PRESENT,
+ .quirks = SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA,
};
static const struct soc_device_attribute sdhci_am654_devices[] = {
@@ -556,7 +706,6 @@ static const struct cqhci_host_ops sdhci_am654_cqhci_ops = {
static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
{
struct cqhci_host *cq_host;
- int ret;
cq_host = devm_kzalloc(mmc_dev(host->mmc), sizeof(struct cqhci_host),
GFP_KERNEL);
@@ -570,9 +719,7 @@ static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
host->mmc->caps2 |= MMC_CAP2_CQE;
- ret = cqhci_init(cq_host, host->mmc, 1);
-
- return ret;
+ return cqhci_init(cq_host, host->mmc, 1);
}
static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
@@ -582,32 +729,15 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
int i;
int ret;
- ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].otap_binding,
- &sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]);
- if (ret) {
- /*
- * ti,otap-del-sel-legacy is mandatory, look for old binding
- * if not found.
- */
- ret = device_property_read_u32(dev, "ti,otap-del-sel",
- &sdhci_am654->otap_del_sel[0]);
- if (ret) {
- dev_err(dev, "Couldn't find otap-del-sel\n");
-
- return ret;
- }
-
- dev_info(dev, "Using legacy binding ti,otap-del-sel\n");
- sdhci_am654->legacy_otapdly = true;
-
- return 0;
- }
-
- for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
+ for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
ret = device_property_read_u32(dev, td[i].otap_binding,
&sdhci_am654->otap_del_sel[i]);
if (ret) {
+ if (i == MMC_TIMING_LEGACY) {
+ dev_err(dev, "Couldn't find mandatory ti,otap-del-sel-legacy\n");
+ return ret;
+ }
dev_dbg(dev, "Couldn't find %s\n",
td[i].otap_binding);
/*
@@ -620,9 +750,12 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
host->mmc->caps2 &= ~td[i].capability;
}
- if (td[i].itap_binding)
- device_property_read_u32(dev, td[i].itap_binding,
- &sdhci_am654->itap_del_sel[i]);
+ if (td[i].itap_binding) {
+ ret = device_property_read_u32(dev, td[i].itap_binding,
+ &sdhci_am654->itap_del_sel[i]);
+ if (!ret)
+ sdhci_am654->itap_del_ena[i] = 0x1;
+ }
}
return 0;
@@ -632,6 +765,7 @@ static int sdhci_am654_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ struct device *dev = mmc_dev(host->mmc);
u32 ctl_cfg_2 = 0;
u32 mask;
u32 val;
@@ -672,6 +806,9 @@ static int sdhci_am654_init(struct sdhci_host *host)
regmap_update_bits(sdhci_am654->base, CTL_CFG_3, TUNINGFORSDR50_MASK,
TUNINGFORSDR50_MASK);
+ /* Use to re-execute tuning */
+ sdhci_am654->tuning_loop = 0;
+
ret = sdhci_setup_host(host);
if (ret)
return ret;
@@ -684,6 +821,12 @@ static int sdhci_am654_init(struct sdhci_host *host)
if (ret)
goto err_cleanup_host;
+ if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_DISABLE_HS400 &&
+ host->mmc->caps2 & (MMC_CAP2_HS400 | MMC_CAP2_HS400_ES)) {
+ dev_info(dev, "HS400 mode not supported on this silicon revision, disabling it\n");
+ host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
+ }
+
ret = __sdhci_add_host(host);
if (ret)
goto err_cleanup_host;
@@ -739,11 +882,20 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
device_property_read_u32(dev, "ti,clkbuf-sel",
&sdhci_am654->clkbuf_sel);
+ if (device_property_read_bool(dev, "ti,fails-without-test-cd"))
+ sdhci_am654->quirks |= SDHCI_AM654_QUIRK_FORCE_CDTEST;
+
sdhci_get_of_property(pdev);
return 0;
}
+static const struct soc_device_attribute sdhci_am654_descope_hs400[] = {
+ { .family = "AM62PX", .revision = "SR1.0" },
+ { .family = "AM62PX", .revision = "SR1.1" },
+ { /* sentinel */ }
+};
+
static const struct of_device_id sdhci_am654_of_match[] = {
{
.compatible = "ti,am654-sdhci-5.1",
@@ -759,11 +911,15 @@ static const struct of_device_id sdhci_am654_of_match[] = {
},
{
.compatible = "ti,am64-sdhci-8bit",
- .data = &sdhci_am64_8bit_drvdata,
+ .data = &sdhci_j721e_8bit_drvdata,
},
{
.compatible = "ti,am64-sdhci-4bit",
- .data = &sdhci_am64_4bit_drvdata,
+ .data = &sdhci_j721e_4bit_drvdata,
+ },
+ {
+ .compatible = "ti,am62-sdhci",
+ .data = &sdhci_am62_4bit_drvdata,
},
{ /* sentinel */ }
};
@@ -797,83 +953,186 @@ static int sdhci_am654_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
sdhci_am654->flags = drvdata->flags;
+ sdhci_am654->quirks = drvdata->quirks;
clk_xin = devm_clk_get(dev, "clk_xin");
if (IS_ERR(clk_xin)) {
dev_err(dev, "clk_xin clock not found.\n");
- ret = PTR_ERR(clk_xin);
- goto err_pltfm_free;
+ return PTR_ERR(clk_xin);
}
pltfm_host->clk = clk_xin;
- /* Clocks are enabled using pm_runtime */
- pm_runtime_enable(dev);
- ret = pm_runtime_resume_and_get(dev);
- if (ret)
- goto pm_runtime_disable;
-
base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto pm_runtime_put;
+ return PTR_ERR(base);
}
sdhci_am654->base = devm_regmap_init_mmio(dev, base,
&sdhci_am654_regmap_config);
if (IS_ERR(sdhci_am654->base)) {
dev_err(dev, "Failed to initialize regmap\n");
- ret = PTR_ERR(sdhci_am654->base);
- goto pm_runtime_put;
+ return PTR_ERR(sdhci_am654->base);
}
ret = sdhci_am654_get_of_property(pdev, sdhci_am654);
if (ret)
- goto pm_runtime_put;
+ return ret;
ret = mmc_of_parse(host->mmc);
- if (ret) {
- dev_err(dev, "parsing dt failed (%d)\n", ret);
- goto pm_runtime_put;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "parsing dt failed\n");
+ soc = soc_device_match(sdhci_am654_descope_hs400);
+ if (soc)
+ sdhci_am654->quirks |= SDHCI_AM654_QUIRK_DISABLE_HS400;
+
+ host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch;
host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+ pm_runtime_get_noresume(dev);
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ goto pm_put;
+ pm_runtime_enable(dev);
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret)
+ goto pm_disable;
+
ret = sdhci_am654_init(host);
if (ret)
- goto pm_runtime_put;
+ goto clk_disable;
+ /* Setting up autosuspend */
+ pm_runtime_set_autosuspend_delay(dev, SDHCI_AM654_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put_autosuspend(dev);
return 0;
-pm_runtime_put:
- pm_runtime_put_sync(dev);
-pm_runtime_disable:
+clk_disable:
+ clk_disable_unprepare(pltfm_host->clk);
+pm_disable:
pm_runtime_disable(dev);
-err_pltfm_free:
- sdhci_pltfm_free(pdev);
+pm_put:
+ pm_runtime_put_noidle(dev);
return ret;
}
-static int sdhci_am654_remove(struct platform_device *pdev)
+static void sdhci_am654_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct device *dev = &pdev->dev;
int ret;
- sdhci_remove_host(host, true);
- ret = pm_runtime_put_sync(&pdev->dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
+ dev_err(dev, "pm_runtime_get_sync() Failed\n");
+
+ sdhci_remove_host(host, true);
+ clk_disable_unprepare(pltfm_host->clk);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+}
+
+static int sdhci_am654_restore(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ u32 ctl_cfg_2 = 0;
+ u32 val;
+ int ret;
+
+ if (sdhci_am654->flags & DLL_CALIB) {
+ regmap_read(sdhci_am654->base, PHY_STAT1, &val);
+ if (~val & CALDONE_MASK) {
+ /* Calibrate IO lines */
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL1,
+ PDB_MASK, PDB_MASK);
+ ret = regmap_read_poll_timeout(sdhci_am654->base,
+ PHY_STAT1, val,
+ val & CALDONE_MASK,
+ 1, 20);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Enable pins by setting IO mux to 0 */
+ if (sdhci_am654->flags & IOMUX_PRESENT)
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL1,
+ IOMUX_ENABLE_MASK, 0);
+
+ /* Set slot type based on SD or eMMC */
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ ctl_cfg_2 = SLOTTYPE_EMBEDDED;
+
+ regmap_update_bits(sdhci_am654->base, CTL_CFG_2, SLOTTYPE_MASK,
+ ctl_cfg_2);
+
+ regmap_read(sdhci_am654->base, CTL_CFG_3, &val);
+ if (~val & TUNINGFORSDR50_MASK)
+ /* Enable tuning for SDR50 */
+ regmap_update_bits(sdhci_am654->base, CTL_CFG_3, TUNINGFORSDR50_MASK,
+ TUNINGFORSDR50_MASK);
+
+ return 0;
+}
+
+static int sdhci_am654_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int ret;
+
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
+
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+
+ sdhci_runtime_suspend_host(host);
+
+ /* disable the clock */
+ clk_disable_unprepare(pltfm_host->clk);
+ return 0;
+}
+
+static int sdhci_am654_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int ret;
+
+ /* Enable the clock */
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret)
return ret;
- pm_runtime_disable(&pdev->dev);
- sdhci_pltfm_free(pdev);
+ ret = sdhci_am654_restore(host);
+ if (ret)
+ return ret;
+
+ sdhci_runtime_resume_host(host, 0);
+
+ ret = cqhci_resume(host->mmc);
+ if (ret)
+ return ret;
return 0;
}
+static const struct dev_pm_ops sdhci_am654_dev_pm_ops = {
+ RUNTIME_PM_OPS(sdhci_am654_runtime_suspend, sdhci_am654_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
static struct platform_driver sdhci_am654_driver = {
.driver = {
.name = "sdhci-am654",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = pm_ptr(&sdhci_am654_dev_pm_ops),
.of_match_table = sdhci_am654_of_match,
},
.probe = sdhci_am654_probe,
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 3f5977979cf2..e9b934e159ad 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -5,6 +5,7 @@
* Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
* Vincent Yang <vincent.yang@tw.fujitsu.com>
* Copyright (C) 2015 Linaro Ltd Andy Green <andy.green@linaro.org>
+ * Copyright (C) 2019 Socionext Inc.
*/
#include <linux/acpi.h>
@@ -14,6 +15,7 @@
#include <linux/of.h>
#include <linux/property.h>
#include <linux/clk.h>
+#include <linux/reset.h>
#include "sdhci-pltfm.h"
#include "sdhci_f_sdh30.h"
@@ -21,14 +23,22 @@
struct f_sdhost_priv {
struct clk *clk_iface;
struct clk *clk;
+ struct reset_control *rst;
u32 vendor_hs200;
struct device *dev;
bool enable_cmd_dat_delay;
};
+static void *sdhci_f_sdhost_priv(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return sdhci_pltfm_priv(pltfm_host);
+}
+
static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
{
- struct f_sdhost_priv *priv = sdhci_priv(host);
+ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
u32 ctrl = 0;
usleep_range(2500, 3000);
@@ -61,7 +71,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
{
- struct f_sdhost_priv *priv = sdhci_priv(host);
+ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
u32 ctl;
if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
@@ -74,6 +84,13 @@ static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
ctl |= F_SDH30_CMD_DAT_DELAY;
sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
}
+
+ if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
+ !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
+ ctl = sdhci_readl(host, F_SDH30_TEST);
+ ctl |= F_SDH30_FORCE_CARD_INSERT;
+ sdhci_writel(host, ctl, F_SDH30_TEST);
+ }
}
static const struct sdhci_ops sdhci_f_sdh30_ops = {
@@ -85,61 +102,49 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
+static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = {
+ .ops = &sdhci_f_sdh30_ops,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
+ | SDHCI_QUIRK_INVERTED_WRITE_PROTECT,
+ .quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE
+ | SDHCI_QUIRK2_TUNING_WORK_AROUND,
+};
+
static int sdhci_f_sdh30_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
- int irq, ctrl = 0, ret = 0;
+ int ctrl = 0, ret = 0;
struct f_sdhost_priv *priv;
+ struct sdhci_pltfm_host *pltfm_host;
u32 reg = 0;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
+ host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data,
+ sizeof(struct f_sdhost_priv));
if (IS_ERR(host))
return PTR_ERR(host);
- priv = sdhci_priv(host);
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
priv->dev = dev;
- host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
- SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
- host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
- SDHCI_QUIRK2_TUNING_WORK_AROUND;
-
priv->enable_cmd_dat_delay = device_property_read_bool(dev,
"fujitsu,cmd-dat-delay-select");
ret = mmc_of_parse(host->mmc);
if (ret)
- goto err;
-
- platform_set_drvdata(pdev, host);
-
- host->hw_name = "f_sdh30";
- host->ops = &sdhci_f_sdh30_ops;
- host->irq = irq;
-
- host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(host->ioaddr)) {
- ret = PTR_ERR(host->ioaddr);
- goto err;
- }
+ return ret;
if (dev_of_node(dev)) {
sdhci_get_of_property(pdev);
priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(priv->clk_iface)) {
- ret = PTR_ERR(priv->clk_iface);
- goto err;
- }
+ if (IS_ERR(priv->clk_iface))
+ return PTR_ERR(priv->clk_iface);
ret = clk_prepare_enable(priv->clk_iface);
if (ret)
- goto err;
+ return ret;
priv->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(priv->clk)) {
@@ -150,6 +155,16 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
ret = clk_prepare_enable(priv->clk);
if (ret)
goto err_clk;
+
+ priv->rst = devm_reset_control_get_optional_shared(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ ret = PTR_ERR(priv->rst);
+ goto err_rst;
+ }
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto err_rst;
}
/* init vendor specific regs */
@@ -168,6 +183,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
if (reg & SDHCI_CAN_DO_8BIT)
priv->vendor_hs200 = F_SDH30_EMMC_HS200;
+ if (!(reg & SDHCI_TIMEOUT_CLK_MASK))
+ host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
+
ret = sdhci_add_host(host);
if (ret)
goto err_add_host;
@@ -175,34 +193,33 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
return 0;
err_add_host:
+ reset_control_assert(priv->rst);
+err_rst:
clk_disable_unprepare(priv->clk);
err_clk:
clk_disable_unprepare(priv->clk_iface);
-err:
- sdhci_free_host(host);
return ret;
}
-static int sdhci_f_sdh30_remove(struct platform_device *pdev)
+static void sdhci_f_sdh30_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
- struct f_sdhost_priv *priv = sdhci_priv(host);
-
- sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
- 0xffffffff);
-
- clk_disable_unprepare(priv->clk_iface);
- clk_disable_unprepare(priv->clk);
+ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
+ struct clk *clk_iface = priv->clk_iface;
+ struct reset_control *rst = priv->rst;
+ struct clk *clk = priv->clk;
- sdhci_free_host(host);
- platform_set_drvdata(pdev, NULL);
+ sdhci_pltfm_remove(pdev);
- return 0;
+ reset_control_assert(rst);
+ clk_disable_unprepare(clk);
+ clk_disable_unprepare(clk_iface);
}
#ifdef CONFIG_OF
static const struct of_device_id f_sdh30_dt_ids[] = {
{ .compatible = "fujitsu,mb86s70-sdhci-3.0" },
+ { .compatible = "socionext,f-sdh30-e51-mmc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, f_sdh30_dt_ids);
@@ -224,13 +241,13 @@ static struct platform_driver sdhci_f_sdh30_driver = {
.acpi_match_table = ACPI_PTR(f_sdh30_acpi_ids),
.pm = &sdhci_pltfm_pmops,
},
- .probe = sdhci_f_sdh30_probe,
- .remove = sdhci_f_sdh30_remove,
+ .probe = sdhci_f_sdh30_probe,
+ .remove = sdhci_f_sdh30_remove,
};
module_platform_driver(sdhci_f_sdh30_driver);
MODULE_DESCRIPTION("F_SDH30 SD Card Controller driver");
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("FUJITSU SEMICONDUCTOR LTD.");
+MODULE_AUTHOR("FUJITSU SEMICONDUCTOR LTD., Socionext Inc.");
MODULE_ALIAS("platform:f_sdh30");
diff --git a/drivers/mmc/host/sdhci_f_sdh30.h b/drivers/mmc/host/sdhci_f_sdh30.h
index fc1ad28f7ca9..7c3c66291d42 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.h
+++ b/drivers/mmc/host/sdhci_f_sdh30.h
@@ -29,4 +29,7 @@
#define F_SDH30_CMD_DAT_DELAY BIT(9)
#define F_SDH30_EMMC_HS200 BIT(24)
+#define F_SDH30_TEST 0x158
+#define F_SDH30_FORCE_CARD_INSERT BIT(6)
+
#define F_SDH30_MIN_CLOCK 400000
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index 76a8cd3a186f..481cb552c2b4 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -101,14 +101,6 @@ static inline void sdricoh_writel(struct sdricoh_host *host, unsigned int reg,
}
-static inline unsigned int sdricoh_readw(struct sdricoh_host *host,
- unsigned int reg)
-{
- unsigned int value = readw(host->iobase + reg);
- dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value);
- return value;
-}
-
static inline void sdricoh_writew(struct sdricoh_host *host, unsigned int reg,
unsigned short value)
{
@@ -411,9 +403,9 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
}
/* allocate privdata */
mmc = pcmcia_dev->priv =
- mmc_alloc_host(sizeof(struct sdricoh_host), &pcmcia_dev->dev);
+ devm_mmc_alloc_host(&pcmcia_dev->dev, sizeof(*host));
if (!mmc) {
- dev_err(dev, "mmc_alloc_host failed\n");
+ dev_err(dev, "devm_mmc_alloc_host failed\n");
result = -ENOMEM;
goto unmap_io;
}
@@ -439,7 +431,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
if (sdricoh_reset(host)) {
dev_dbg(dev, "could not reset\n");
result = -EIO;
- goto free_host;
+ goto unmap_io;
}
result = mmc_add_host(mmc);
@@ -448,8 +440,6 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
dev_dbg(dev, "mmc host registered\n");
return 0;
}
-free_host:
- mmc_free_host(mmc);
unmap_io:
pci_iounmap(pci_dev, iobase);
return result;
@@ -491,10 +481,8 @@ static void sdricoh_pcmcia_detach(struct pcmcia_device *link)
mmc_remove_host(mmc);
pci_iounmap(host->pci_dev, host->iobase);
pci_dev_put(host->pci_dev);
- mmc_free_host(mmc);
}
pcmcia_disable_device(link);
-
}
#ifdef CONFIG_PM
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index e5e457037235..bf899c8e38f5 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -43,12 +43,11 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
-#include <linux/mmc/sh_mmcif.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
#include <linux/pagemap.h>
+#include <linux/platform_data/sh_mmcif.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
@@ -228,14 +227,12 @@ struct sh_mmcif_host {
bool dying;
long timeout;
void __iomem *addr;
- u32 *pio_ptr;
spinlock_t lock; /* protect sh_mmcif_host::state */
enum sh_mmcif_state state;
enum sh_mmcif_wait_for wait_for;
struct delayed_work timeout_work;
size_t blocksize;
- int sg_idx;
- int sg_blkidx;
+ struct sg_mapping_iter sg_miter;
bool power;
bool ccs_enable; /* Command Completion Signal support */
bool clk_ctrl2_enable;
@@ -405,6 +402,9 @@ static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
struct dma_slave_config cfg = { 0, };
res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
cfg.direction = direction;
if (direction == DMA_DEV_TO_MEM) {
@@ -439,14 +439,15 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
if (IS_ERR(host->chan_rx))
host->chan_rx = NULL;
}
- dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
- host->chan_rx);
if (!host->chan_tx || !host->chan_rx ||
sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
goto error;
+ dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
+ host->chan_rx);
+
return;
error:
@@ -518,8 +519,7 @@ static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
}
dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
- (best_freq / (1 << (clkdiv + 1))), clk,
- best_freq, clkdiv);
+ (best_freq >> (clkdiv + 1)), clk, best_freq, clkdiv);
clk_set_rate(host->clk, best_freq);
clkdiv = clkdiv << 16;
@@ -599,32 +599,17 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
return ret;
}
-static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
-{
- struct mmc_data *data = host->mrq->data;
-
- host->sg_blkidx += host->blocksize;
-
- /* data->sg->length must be a multiple of host->blocksize? */
- BUG_ON(host->sg_blkidx > data->sg->length);
-
- if (host->sg_blkidx == data->sg->length) {
- host->sg_blkidx = 0;
- if (++host->sg_idx < data->sg_len)
- host->pio_ptr = sg_virt(++data->sg);
- } else {
- host->pio_ptr = p;
- }
-
- return host->sg_idx != data->sg_len;
-}
-
static void sh_mmcif_single_read(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct mmc_data *data = mrq->data;
+
host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK) + 3;
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
+ SG_MITER_TO_SG);
+
host->wait_for = MMCIF_WAIT_FOR_READ;
/* buf read enable */
@@ -633,20 +618,32 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host,
static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = sg_virt(data->sg);
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
+ if (!sg_miter_next(sgm)) {
+ /* This should not happen on single blocks */
+ sg_miter_stop(sgm);
+ return false;
+ }
+
+ p = sgm->addr;
+
for (i = 0; i < host->blocksize / 4; i++)
*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+ sg_miter_stop(&host->sg_miter);
+
/* buffer read end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
host->wait_for = MMCIF_WAIT_FOR_READ_END;
@@ -657,6 +654,7 @@ static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct mmc_data *data = mrq->data;
if (!data->sg_len || !data->sg->length)
@@ -665,46 +663,63 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK;
+ sg_miter_start(sgm, data->sg, data->sg_len,
+ SG_MITER_TO_SG);
+
+ /* Advance to the first sglist entry */
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
+ return;
+ }
+
host->wait_for = MMCIF_WAIT_FOR_MREAD;
- host->sg_idx = 0;
- host->sg_blkidx = 0;
- host->pio_ptr = sg_virt(data->sg);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
}
static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = host->pio_ptr;
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
- BUG_ON(!data->sg->length);
+ p = sgm->addr;
for (i = 0; i < host->blocksize / 4; i++)
*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
- if (!sh_mmcif_next_block(host, p))
- return false;
+ sgm->consumed = host->blocksize;
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
+ return false;
+ }
+
return true;
}
static void sh_mmcif_single_write(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct mmc_data *data = mrq->data;
+
host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK) + 3;
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
+ SG_MITER_FROM_SG);
+
host->wait_for = MMCIF_WAIT_FOR_WRITE;
/* buf write enable */
@@ -713,20 +728,32 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host,
static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = sg_virt(data->sg);
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
+ if (!sg_miter_next(sgm)) {
+ /* This should not happen on single blocks */
+ sg_miter_stop(sgm);
+ return false;
+ }
+
+ p = sgm->addr;
+
for (i = 0; i < host->blocksize / 4; i++)
sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+ sg_miter_stop(&host->sg_miter);
+
/* buffer write end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
@@ -737,6 +764,7 @@ static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct mmc_data *data = mrq->data;
if (!data->sg_len || !data->sg->length)
@@ -745,34 +773,46 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK;
+ sg_miter_start(sgm, data->sg, data->sg_len,
+ SG_MITER_FROM_SG);
+
+ /* Advance to the first sglist entry */
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
+ return;
+ }
+
host->wait_for = MMCIF_WAIT_FOR_MWRITE;
- host->sg_idx = 0;
- host->sg_blkidx = 0;
- host->pio_ptr = sg_virt(data->sg);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
}
static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = host->pio_ptr;
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
- BUG_ON(!data->sg->length);
+ p = sgm->addr;
for (i = 0; i < host->blocksize / 4; i++)
sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
- if (!sh_mmcif_next_block(host, p))
+ sgm->consumed = host->blocksize;
+
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
return false;
+ }
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
@@ -1009,8 +1049,8 @@ static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
*/
host->clkdiv_map = 0x3ff;
- host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map));
- host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map));
+ host->mmc->f_max = f_max >> ffs(host->clkdiv_map);
+ host->mmc->f_min = f_min >> fls(host->clkdiv_map);
} else {
unsigned int clk = clk_get_rate(host->clk);
@@ -1164,9 +1204,9 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
data->bytes_xfered = 0;
/* Abort DMA */
if (data->flags & MMC_DATA_READ)
- dmaengine_terminate_all(host->chan_rx);
+ dmaengine_terminate_sync(host->chan_rx);
else
- dmaengine_terminate_all(host->chan_tx);
+ dmaengine_terminate_sync(host->chan_tx);
}
return false;
@@ -1398,19 +1438,19 @@ static int sh_mmcif_probe(struct platform_device *pdev)
irq[0] = platform_get_irq(pdev, 0);
irq[1] = platform_get_irq_optional(pdev, 1);
if (irq[0] < 0)
- return -ENXIO;
+ return irq[0];
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
- mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
+ mmc = devm_mmc_alloc_host(dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
ret = mmc_of_parse(mmc);
if (ret < 0)
- goto err_host;
+ return ret;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -1441,15 +1481,13 @@ static int sh_mmcif_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- dev_err(dev, "cannot get clock: %d\n", ret);
- goto err_host;
- }
+ if (IS_ERR(host->clk))
+ return dev_err_probe(dev, PTR_ERR(host->clk),
+ "cannot get clock\n");
ret = clk_prepare_enable(host->clk);
if (ret < 0)
- goto err_host;
+ return ret;
sh_mmcif_clk_setup(host);
@@ -1502,12 +1540,10 @@ err_clk:
clk_disable_unprepare(host->clk);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-err_host:
- mmc_free_host(mmc);
return ret;
}
-static int sh_mmcif_remove(struct platform_device *pdev)
+static void sh_mmcif_remove(struct platform_device *pdev)
{
struct sh_mmcif_host *host = platform_get_drvdata(pdev);
@@ -1528,14 +1564,10 @@ static int sh_mmcif_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&host->timeout_work);
clk_disable_unprepare(host->clk);
- mmc_free_host(host->mmc);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int sh_mmcif_suspend(struct device *dev)
{
struct sh_mmcif_host *host = dev_get_drvdata(dev);
@@ -1547,15 +1579,7 @@ static int sh_mmcif_suspend(struct device *dev)
return 0;
}
-static int sh_mmcif_resume(struct device *dev)
-{
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(sh_mmcif_dev_pm_ops, sh_mmcif_suspend, NULL);
static struct platform_driver sh_mmcif_driver = {
.probe = sh_mmcif_probe,
@@ -1563,7 +1587,7 @@ static struct platform_driver sh_mmcif_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sh_mmcif_dev_pm_ops,
+ .pm = pm_sleep_ptr(&sh_mmcif_dev_pm_ops),
.of_match_table = sh_mmcif_of_match,
},
};
diff --git a/drivers/mmc/host/sunplus-mmc.c b/drivers/mmc/host/sunplus-mmc.c
new file mode 100644
index 000000000000..63279760239c
--- /dev/null
+++ b/drivers/mmc/host/sunplus-mmc.c
@@ -0,0 +1,997 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Sunplus Inc.
+ * Author: Tony Huang <tonyhuang.sunplus@gmail.com>
+ * Author: Li-hao Kuo <lhjeff911@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#define SPMMC_MIN_CLK 400000
+#define SPMMC_MAX_CLK 52000000
+#define SPMMC_MAX_BLK_COUNT 65536
+#define SPMMC_MAX_TUNABLE_DLY 7
+#define SPMMC_TIMEOUT_US 500000
+#define SPMMC_POLL_DELAY_US 10
+
+#define SPMMC_CARD_MEDIATYPE_SRCDST_REG 0x0000
+#define SPMMC_MEDIA_TYPE GENMASK(2, 0)
+#define SPMMC_DMA_SOURCE GENMASK(6, 4)
+#define SPMMC_DMA_DESTINATION GENMASK(10, 8)
+#define SPMMC_MEDIA_NONE 0
+#define SPMMC_MEDIA_SD 6
+#define SPMMC_MEDIA_MS 7
+
+#define SPMMC_SDRAM_SECTOR_0_SIZE_REG 0x0008
+#define SPMMC_DMA_BASE_ADDR_REG 0x000C
+#define SPMMC_HW_DMA_CTRL_REG 0x0010
+#define SPMMC_HW_DMA_RST BIT(9)
+#define SPMMC_DMAIDLE BIT(10)
+
+#define SPMMC_MAX_DMA_MEMORY_SECTORS 8
+
+#define SPMMC_SDRAM_SECTOR_1_ADDR_REG 0x0018
+#define SPMMC_SDRAM_SECTOR_1_LENG_REG 0x001C
+#define SPMMC_SDRAM_SECTOR_2_ADDR_REG 0x0020
+#define SPMMC_SDRAM_SECTOR_2_LENG_REG 0x0024
+#define SPMMC_SDRAM_SECTOR_3_ADDR_REG 0x0028
+#define SPMMC_SDRAM_SECTOR_3_LENG_REG 0x002C
+#define SPMMC_SDRAM_SECTOR_4_ADDR_REG 0x0030
+#define SPMMC_SDRAM_SECTOR_4_LENG_REG 0x0034
+#define SPMMC_SDRAM_SECTOR_5_ADDR_REG 0x0038
+#define SPMMC_SDRAM_SECTOR_5_LENG_REG 0x003C
+#define SPMMC_SDRAM_SECTOR_6_ADDR_REG 0x0040
+#define SPMMC_SDRAM_SECTOR_6_LENG_REG 0x0044
+#define SPMMC_SDRAM_SECTOR_7_ADDR_REG 0x0048
+#define SPMMC_SDRAM_SECTOR_7_LENG_REG 0x004C
+
+#define SPMMC_SD_INT_REG 0x0088
+#define SPMMC_SDINT_SDCMPEN BIT(0)
+#define SPMMC_SDINT_SDCMP BIT(1)
+#define SPMMC_SDINT_SDCMPCLR BIT(2)
+#define SPMMC_SDINT_SDIOEN BIT(3)
+#define SPMMC_SDINT_SDIO BIT(4)
+#define SPMMC_SDINT_SDIOCLR BIT(5)
+
+#define SPMMC_SD_PAGE_NUM_REG 0x008C
+
+#define SPMMC_SD_CONFIG0_REG 0x0090
+#define SPMMC_SD_PIO_MODE BIT(0)
+#define SPMMC_SD_DDR_MODE BIT(1)
+#define SPMMC_SD_LEN_MODE BIT(2)
+#define SPMMC_SD_TRANS_MODE GENMASK(5, 4)
+#define SPMMC_SD_AUTO_RESPONSE BIT(6)
+#define SPMMC_SD_CMD_DUMMY BIT(7)
+#define SPMMC_SD_RSP_CHK_EN BIT(8)
+#define SPMMC_SDIO_MODE BIT(9)
+#define SPMMC_SD_MMC_MODE BIT(10)
+#define SPMMC_SD_DATA_WD BIT(11)
+#define SPMMC_RX4_EN BIT(14)
+#define SPMMC_SD_RSP_TYPE BIT(15)
+#define SPMMC_MMC8_EN BIT(18)
+#define SPMMC_CLOCK_DIVISION GENMASK(31, 20)
+
+#define SPMMC_SDIO_CTRL_REG 0x0094
+#define SPMMC_INT_MULTI_TRIG BIT(6)
+
+#define SPMMC_SD_RST_REG 0x0098
+#define SPMMC_SD_CTRL_REG 0x009C
+#define SPMMC_NEW_COMMAND_TRIGGER BIT(0)
+#define SPMMC_DUMMY_CLOCK_TRIGGER BIT(1)
+
+#define SPMMC_SD_STATUS_REG 0x00A0
+#define SPMMC_SDSTATUS_DUMMY_READY BIT(0)
+#define SPMMC_SDSTATUS_RSP_BUF_FULL BIT(1)
+#define SPMMC_SDSTATUS_TX_DATA_BUF_EMPTY BIT(2)
+#define SPMMC_SDSTATUS_RX_DATA_BUF_FULL BIT(3)
+#define SPMMC_SDSTATUS_CMD_PIN_STATUS BIT(4)
+#define SPMMC_SDSTATUS_DAT0_PIN_STATUS BIT(5)
+#define SPMMC_SDSTATUS_RSP_TIMEOUT BIT(6)
+#define SPMMC_SDSTATUS_CARD_CRC_CHECK_TIMEOUT BIT(7)
+#define SPMMC_SDSTATUS_STB_TIMEOUT BIT(8)
+#define SPMMC_SDSTATUS_RSP_CRC7_ERROR BIT(9)
+#define SPMMC_SDSTATUS_CRC_TOKEN_CHECK_ERROR BIT(10)
+#define SPMMC_SDSTATUS_RDATA_CRC16_ERROR BIT(11)
+#define SPMMC_SDSTATUS_SUSPEND_STATE_READY BIT(12)
+#define SPMMC_SDSTATUS_BUSY_CYCLE BIT(13)
+#define SPMMC_SDSTATUS_DAT1_PIN_STATUS BIT(14)
+#define SPMMC_SDSTATUS_SD_SENSE_STATUS BIT(15)
+#define SPMMC_SDSTATUS_BOOT_ACK_TIMEOUT BIT(16)
+#define SPMMC_SDSTATUS_BOOT_DATA_TIMEOUT BIT(17)
+#define SPMMC_SDSTATUS_BOOT_ACK_ERROR BIT(18)
+
+#define SPMMC_SD_STATE_REG 0x00A4
+#define SPMMC_CRCTOKEN_CHECK_RESULT GENMASK(6, 4)
+#define SPMMC_SDSTATE_ERROR BIT(13)
+#define SPMMC_SDSTATE_FINISH BIT(14)
+
+#define SPMMC_SD_HW_STATE_REG 0x00A8
+#define SPMMC_SD_BLOCKSIZE_REG 0x00AC
+
+#define SPMMC_SD_CONFIG1_REG 0x00B0
+#define SPMMC_TX_DUMMY_NUM GENMASK(8, 0)
+#define SPMMC_SD_HIGH_SPEED_EN BIT(31)
+
+#define SPMMC_SD_TIMING_CONFIG0_REG 0x00B4
+#define SPMMC_SD_CLOCK_DELAY GENMASK(2, 0)
+#define SPMMC_SD_WRITE_DATA_DELAY GENMASK(6, 4)
+#define SPMMC_SD_WRITE_COMMAND_DELAY GENMASK(10, 8)
+#define SPMMC_SD_READ_RESPONSE_DELAY GENMASK(14, 12)
+#define SPMMC_SD_READ_DATA_DELAY GENMASK(18, 16)
+#define SPMMC_SD_READ_CRC_DELAY GENMASK(22, 20)
+
+#define SPMMC_SD_PIODATATX_REG 0x00BC
+#define SPMMC_SD_PIODATARX_REG 0x00C0
+#define SPMMC_SD_CMDBUF0_3_REG 0x00C4
+#define SPMMC_SD_CMDBUF4_REG 0x00C8
+#define SPMMC_SD_RSPBUF0_3_REG 0x00CC
+#define SPMMC_SD_RSPBUF4_5_REG 0x00D0
+
+#define SPMMC_MAX_RETRIES (8 * 8)
+
+struct spmmc_tuning_info {
+ int enable_tuning;
+ int need_tuning;
+ int retried; /* how many times has been retried */
+ u32 rd_crc_dly:3;
+ u32 rd_dat_dly:3;
+ u32 rd_rsp_dly:3;
+ u32 wr_cmd_dly:3;
+ u32 wr_dat_dly:3;
+ u32 clk_dly:3;
+};
+
+#define SPMMC_DMA_MODE 0
+#define SPMMC_PIO_MODE 1
+
+struct spmmc_host {
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rstc;
+ struct mmc_host *mmc;
+ struct mmc_request *mrq; /* current mrq */
+ int irq;
+ int dmapio_mode;
+ struct spmmc_tuning_info tuning_info;
+ int dma_int_threshold;
+ int dma_use_int;
+};
+
+static inline int spmmc_wait_finish(struct spmmc_host *host)
+{
+ u32 state;
+
+ return readl_poll_timeout(host->base + SPMMC_SD_STATE_REG, state,
+ (state & SPMMC_SDSTATE_FINISH),
+ SPMMC_POLL_DELAY_US, SPMMC_TIMEOUT_US);
+}
+
+static inline int spmmc_wait_sdstatus(struct spmmc_host *host, unsigned int status_bit)
+{
+ u32 status;
+
+ return readl_poll_timeout(host->base + SPMMC_SD_STATUS_REG, status,
+ (status & status_bit),
+ SPMMC_POLL_DELAY_US, SPMMC_TIMEOUT_US);
+}
+
+#define spmmc_wait_rspbuf_full(host) spmmc_wait_sdstatus(host, SPMMC_SDSTATUS_RSP_BUF_FULL)
+#define spmmc_wait_rxbuf_full(host) spmmc_wait_sdstatus(host, SPMMC_SDSTATUS_RX_DATA_BUF_FULL)
+#define spmmc_wait_txbuf_empty(host) spmmc_wait_sdstatus(host, SPMMC_SDSTATUS_TX_DATA_BUF_EMPTY)
+
+static void spmmc_get_rsp(struct spmmc_host *host, struct mmc_command *cmd)
+{
+ u32 value0_3, value4_5;
+
+ if (!(cmd->flags & MMC_RSP_PRESENT))
+ return;
+ if (cmd->flags & MMC_RSP_136) {
+ if (spmmc_wait_rspbuf_full(host))
+ return;
+ value0_3 = readl(host->base + SPMMC_SD_RSPBUF0_3_REG);
+ value4_5 = readl(host->base + SPMMC_SD_RSPBUF4_5_REG) & 0xffff;
+ cmd->resp[0] = (value0_3 << 8) | (value4_5 >> 8);
+ cmd->resp[1] = value4_5 << 24;
+ value0_3 = readl(host->base + SPMMC_SD_RSPBUF0_3_REG);
+ value4_5 = readl(host->base + SPMMC_SD_RSPBUF4_5_REG) & 0xffff;
+ cmd->resp[1] |= value0_3 >> 8;
+ cmd->resp[2] = value0_3 << 24;
+ cmd->resp[2] |= value4_5 << 8;
+ value0_3 = readl(host->base + SPMMC_SD_RSPBUF0_3_REG);
+ value4_5 = readl(host->base + SPMMC_SD_RSPBUF4_5_REG) & 0xffff;
+ cmd->resp[2] |= value0_3 >> 24;
+ cmd->resp[3] = value0_3 << 8;
+ cmd->resp[3] |= value4_5 >> 8;
+ } else {
+ if (spmmc_wait_rspbuf_full(host))
+ return;
+ value0_3 = readl(host->base + SPMMC_SD_RSPBUF0_3_REG);
+ value4_5 = readl(host->base + SPMMC_SD_RSPBUF4_5_REG) & 0xffff;
+ cmd->resp[0] = (value0_3 << 8) | (value4_5 >> 8);
+ cmd->resp[1] = value4_5 << 24;
+ }
+}
+
+static void spmmc_set_bus_clk(struct spmmc_host *host, int clk)
+{
+ unsigned int clkdiv;
+ int f_min = host->mmc->f_min;
+ int f_max = host->mmc->f_max;
+ u32 value = readl(host->base + SPMMC_SD_CONFIG0_REG);
+
+ if (clk < f_min)
+ clk = f_min;
+ if (clk > f_max)
+ clk = f_max;
+
+ clkdiv = (clk_get_rate(host->clk) + clk) / clk - 1;
+ if (clkdiv > 0xfff)
+ clkdiv = 0xfff;
+ value &= ~SPMMC_CLOCK_DIVISION;
+ value |= FIELD_PREP(SPMMC_CLOCK_DIVISION, clkdiv);
+ writel(value, host->base + SPMMC_SD_CONFIG0_REG);
+}
+
+static void spmmc_set_bus_timing(struct spmmc_host *host, unsigned int timing)
+{
+ u32 value = readl(host->base + SPMMC_SD_CONFIG1_REG);
+ int clkdiv = FIELD_GET(SPMMC_CLOCK_DIVISION, readl(host->base + SPMMC_SD_CONFIG0_REG));
+ int delay = clkdiv / 2 < 7 ? clkdiv / 2 : 7;
+ int hs_en = 1, ddr_enabled = 0;
+
+ switch (timing) {
+ case MMC_TIMING_LEGACY:
+ hs_en = 0;
+ break;
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ hs_en = 1;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ ddr_enabled = 1;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ ddr_enabled = 1;
+ break;
+ default:
+ hs_en = 0;
+ break;
+ }
+
+ if (hs_en) {
+ value |= SPMMC_SD_HIGH_SPEED_EN;
+ writel(value, host->base + SPMMC_SD_CONFIG1_REG);
+ value = readl(host->base + SPMMC_SD_TIMING_CONFIG0_REG);
+ value &= ~SPMMC_SD_WRITE_DATA_DELAY;
+ value |= FIELD_PREP(SPMMC_SD_WRITE_DATA_DELAY, delay);
+ value &= ~SPMMC_SD_WRITE_COMMAND_DELAY;
+ value |= FIELD_PREP(SPMMC_SD_WRITE_COMMAND_DELAY, delay);
+ writel(value, host->base + SPMMC_SD_TIMING_CONFIG0_REG);
+ } else {
+ value &= ~SPMMC_SD_HIGH_SPEED_EN;
+ writel(value, host->base + SPMMC_SD_CONFIG1_REG);
+ }
+ if (ddr_enabled) {
+ value = readl(host->base + SPMMC_SD_CONFIG0_REG);
+ value |= SPMMC_SD_DDR_MODE;
+ writel(value, host->base + SPMMC_SD_CONFIG0_REG);
+ } else {
+ value = readl(host->base + SPMMC_SD_CONFIG0_REG);
+ value &= ~SPMMC_SD_DDR_MODE;
+ writel(value, host->base + SPMMC_SD_CONFIG0_REG);
+ }
+}
+
+static void spmmc_set_bus_width(struct spmmc_host *host, int width)
+{
+ u32 value = readl(host->base + SPMMC_SD_CONFIG0_REG);
+
+ switch (width) {
+ case MMC_BUS_WIDTH_8:
+ value &= ~SPMMC_SD_DATA_WD;
+ value |= SPMMC_MMC8_EN;
+ break;
+ case MMC_BUS_WIDTH_4:
+ value |= SPMMC_SD_DATA_WD;
+ value &= ~SPMMC_MMC8_EN;
+ break;
+ default:
+ value &= ~SPMMC_SD_DATA_WD;
+ value &= ~SPMMC_MMC8_EN;
+ break;
+ }
+ writel(value, host->base + SPMMC_SD_CONFIG0_REG);
+}
+
+/*
+ * select the working mode of controller: sd/sdio/emmc
+ */
+static void spmmc_set_sdmmc_mode(struct spmmc_host *host)
+{
+ u32 value = readl(host->base + SPMMC_SD_CONFIG0_REG);
+
+ value |= SPMMC_SD_MMC_MODE;
+ value &= ~SPMMC_SDIO_MODE;
+ writel(value, host->base + SPMMC_SD_CONFIG0_REG);
+}
+
+static void spmmc_sw_reset(struct spmmc_host *host)
+{
+ u32 value;
+
+ /*
+ * Must reset dma operation first, or it will
+ * be stuck on sd_state == 0x1c00 because of
+ * a controller software reset bug
+ */
+ value = readl(host->base + SPMMC_HW_DMA_CTRL_REG);
+ value |= SPMMC_DMAIDLE;
+ writel(value, host->base + SPMMC_HW_DMA_CTRL_REG);
+ value &= ~SPMMC_DMAIDLE;
+ writel(value, host->base + SPMMC_HW_DMA_CTRL_REG);
+ value = readl(host->base + SPMMC_HW_DMA_CTRL_REG);
+ value |= SPMMC_HW_DMA_RST;
+ writel(value, host->base + SPMMC_HW_DMA_CTRL_REG);
+ writel(0x7, host->base + SPMMC_SD_RST_REG);
+ readl_poll_timeout_atomic(host->base + SPMMC_SD_HW_STATE_REG, value,
+ !(value & BIT(6)), 1, SPMMC_TIMEOUT_US);
+}
+
+static void spmmc_prepare_cmd(struct spmmc_host *host, struct mmc_command *cmd)
+{
+ u32 value;
+
+ /* add start bit, according to spec, command format */
+ value = ((cmd->opcode | 0x40) << 24) | (cmd->arg >> 8);
+ writel(value, host->base + SPMMC_SD_CMDBUF0_3_REG);
+ writeb(cmd->arg & 0xff, host->base + SPMMC_SD_CMDBUF4_REG);
+
+ /* disable interrupt if needed */
+ value = readl(host->base + SPMMC_SD_INT_REG);
+ value |= SPMMC_SDINT_SDCMPCLR;
+ value &= ~SPMMC_SDINT_SDCMPEN;
+ writel(value, host->base + SPMMC_SD_INT_REG);
+
+ value = readl(host->base + SPMMC_SD_CONFIG0_REG);
+ value &= ~SPMMC_SD_TRANS_MODE;
+ value |= SPMMC_SD_CMD_DUMMY;
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ value |= SPMMC_SD_AUTO_RESPONSE;
+ } else {
+ value &= ~SPMMC_SD_AUTO_RESPONSE;
+ writel(value, host->base + SPMMC_SD_CONFIG0_REG);
+
+ return;
+ }
+ /*
+ * Currently, host is not capable of checking R2's CRC7,
+ * thus, enable crc7 check only for 48 bit response commands
+ */
+ if (cmd->flags & MMC_RSP_CRC && !(cmd->flags & MMC_RSP_136))
+ value |= SPMMC_SD_RSP_CHK_EN;
+ else
+ value &= ~SPMMC_SD_RSP_CHK_EN;
+
+ if (cmd->flags & MMC_RSP_136)
+ value |= SPMMC_SD_RSP_TYPE;
+ else
+ value &= ~SPMMC_SD_RSP_TYPE;
+ writel(value, host->base + SPMMC_SD_CONFIG0_REG);
+}
+
+static void spmmc_prepare_data(struct spmmc_host *host, struct mmc_data *data)
+{
+ u32 value, srcdst;
+
+ writel(data->blocks - 1, host->base + SPMMC_SD_PAGE_NUM_REG);
+ writel(data->blksz - 1, host->base + SPMMC_SD_BLOCKSIZE_REG);
+ value = readl(host->base + SPMMC_SD_CONFIG0_REG);
+ if (data->flags & MMC_DATA_READ) {
+ value &= ~SPMMC_SD_TRANS_MODE;
+ value |= FIELD_PREP(SPMMC_SD_TRANS_MODE, 2);
+ value &= ~SPMMC_SD_AUTO_RESPONSE;
+ value &= ~SPMMC_SD_CMD_DUMMY;
+ srcdst = readl(host->base + SPMMC_CARD_MEDIATYPE_SRCDST_REG);
+ srcdst &= ~SPMMC_DMA_SOURCE;
+ srcdst |= FIELD_PREP(SPMMC_DMA_SOURCE, 0x2);
+ srcdst &= ~SPMMC_DMA_DESTINATION;
+ srcdst |= FIELD_PREP(SPMMC_DMA_DESTINATION, 0x1);
+ writel(srcdst, host->base + SPMMC_CARD_MEDIATYPE_SRCDST_REG);
+ } else {
+ value &= ~SPMMC_SD_TRANS_MODE;
+ value |= FIELD_PREP(SPMMC_SD_TRANS_MODE, 1);
+ srcdst = readl(host->base + SPMMC_CARD_MEDIATYPE_SRCDST_REG);
+ srcdst &= ~SPMMC_DMA_SOURCE;
+ srcdst |= FIELD_PREP(SPMMC_DMA_SOURCE, 0x1);
+ srcdst &= ~SPMMC_DMA_DESTINATION;
+ srcdst |= FIELD_PREP(SPMMC_DMA_DESTINATION, 0x2);
+ writel(srcdst, host->base + SPMMC_CARD_MEDIATYPE_SRCDST_REG);
+ }
+
+ value |= SPMMC_SD_LEN_MODE;
+ if (host->dmapio_mode == SPMMC_DMA_MODE) {
+ struct scatterlist *sg;
+ dma_addr_t dma_addr;
+ unsigned int dma_size;
+ int i, count = 1;
+
+ count = dma_map_sg(host->mmc->parent, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (!count || count > SPMMC_MAX_DMA_MEMORY_SECTORS) {
+ data->error = -EINVAL;
+
+ return;
+ }
+ for_each_sg(data->sg, sg, count, i) {
+ dma_addr = sg_dma_address(sg);
+ dma_size = sg_dma_len(sg) / data->blksz - 1;
+ if (i == 0) {
+ writel(dma_addr, host->base + SPMMC_DMA_BASE_ADDR_REG);
+ writel(dma_size, host->base + SPMMC_SDRAM_SECTOR_0_SIZE_REG);
+ } else if (i == 1) {
+ writel(dma_addr, host->base + SPMMC_SDRAM_SECTOR_1_ADDR_REG);
+ writel(dma_size, host->base + SPMMC_SDRAM_SECTOR_1_LENG_REG);
+ } else if (i == 2) {
+ writel(dma_addr, host->base + SPMMC_SDRAM_SECTOR_2_ADDR_REG);
+ writel(dma_size, host->base + SPMMC_SDRAM_SECTOR_2_LENG_REG);
+ } else if (i == 3) {
+ writel(dma_addr, host->base + SPMMC_SDRAM_SECTOR_3_ADDR_REG);
+ writel(dma_size, host->base + SPMMC_SDRAM_SECTOR_3_LENG_REG);
+ } else if (i == 4) {
+ writel(dma_addr, host->base + SPMMC_SDRAM_SECTOR_4_ADDR_REG);
+ writel(dma_size, host->base + SPMMC_SDRAM_SECTOR_4_LENG_REG);
+ } else if (i == 5) {
+ writel(dma_addr, host->base + SPMMC_SDRAM_SECTOR_5_ADDR_REG);
+ writel(dma_size, host->base + SPMMC_SDRAM_SECTOR_5_LENG_REG);
+ } else if (i == 6) {
+ writel(dma_addr, host->base + SPMMC_SDRAM_SECTOR_6_ADDR_REG);
+ writel(dma_size, host->base + SPMMC_SDRAM_SECTOR_6_LENG_REG);
+ } else if (i == 7) {
+ writel(dma_addr, host->base + SPMMC_SDRAM_SECTOR_7_ADDR_REG);
+ writel(dma_size, host->base + SPMMC_SDRAM_SECTOR_7_LENG_REG);
+ }
+ }
+ value &= ~SPMMC_SD_PIO_MODE;
+ writel(value, host->base + SPMMC_SD_CONFIG0_REG);
+ /* enable interrupt if needed */
+ if (data->blksz * data->blocks > host->dma_int_threshold) {
+ host->dma_use_int = 1;
+ value = readl(host->base + SPMMC_SD_INT_REG);
+ value &= ~SPMMC_SDINT_SDCMPEN;
+ value |= FIELD_PREP(SPMMC_SDINT_SDCMPEN, 1); /* sdcmpen */
+ writel(value, host->base + SPMMC_SD_INT_REG);
+ }
+ } else {
+ value |= SPMMC_SD_PIO_MODE;
+ value |= SPMMC_RX4_EN;
+ writel(value, host->base + SPMMC_SD_CONFIG0_REG);
+ }
+}
+
+static inline void spmmc_trigger_transaction(struct spmmc_host *host)
+{
+ u32 value = readl(host->base + SPMMC_SD_CTRL_REG);
+
+ value |= SPMMC_NEW_COMMAND_TRIGGER;
+ writel(value, host->base + SPMMC_SD_CTRL_REG);
+}
+
+static void spmmc_send_stop_cmd(struct spmmc_host *host)
+{
+ struct mmc_command stop = {};
+ u32 value;
+
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.arg = 0;
+ stop.flags = MMC_RSP_R1B;
+ spmmc_prepare_cmd(host, &stop);
+ value = readl(host->base + SPMMC_SD_INT_REG);
+ value &= ~SPMMC_SDINT_SDCMPEN;
+ value |= FIELD_PREP(SPMMC_SDINT_SDCMPEN, 0);
+ writel(value, host->base + SPMMC_SD_INT_REG);
+ spmmc_trigger_transaction(host);
+ readl_poll_timeout(host->base + SPMMC_SD_STATE_REG, value,
+ (value & SPMMC_SDSTATE_FINISH), 1, SPMMC_TIMEOUT_US);
+}
+
+static int spmmc_check_error(struct spmmc_host *host, struct mmc_request *mrq)
+{
+ int ret = 0;
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+
+ u32 value = readl(host->base + SPMMC_SD_STATE_REG);
+ u32 crc_token = FIELD_GET(SPMMC_CRCTOKEN_CHECK_RESULT, value);
+
+ if (value & SPMMC_SDSTATE_ERROR) {
+ u32 timing_cfg0 = 0;
+
+ value = readl(host->base + SPMMC_SD_STATUS_REG);
+
+ if (host->tuning_info.enable_tuning) {
+ timing_cfg0 = readl(host->base + SPMMC_SD_TIMING_CONFIG0_REG);
+ host->tuning_info.rd_crc_dly = FIELD_GET(SPMMC_SD_READ_CRC_DELAY,
+ timing_cfg0);
+ host->tuning_info.rd_dat_dly = FIELD_GET(SPMMC_SD_READ_DATA_DELAY,
+ timing_cfg0);
+ host->tuning_info.rd_rsp_dly = FIELD_GET(SPMMC_SD_READ_RESPONSE_DELAY,
+ timing_cfg0);
+ host->tuning_info.wr_cmd_dly = FIELD_GET(SPMMC_SD_WRITE_COMMAND_DELAY,
+ timing_cfg0);
+ host->tuning_info.wr_dat_dly = FIELD_GET(SPMMC_SD_WRITE_DATA_DELAY,
+ timing_cfg0);
+ }
+
+ if (value & SPMMC_SDSTATUS_RSP_TIMEOUT) {
+ ret = -ETIMEDOUT;
+ host->tuning_info.wr_cmd_dly++;
+ } else if (value & SPMMC_SDSTATUS_RSP_CRC7_ERROR) {
+ ret = -EILSEQ;
+ host->tuning_info.rd_rsp_dly++;
+ } else if (data) {
+ if ((value & SPMMC_SDSTATUS_STB_TIMEOUT)) {
+ ret = -ETIMEDOUT;
+ host->tuning_info.rd_dat_dly++;
+ } else if (value & SPMMC_SDSTATUS_RDATA_CRC16_ERROR) {
+ ret = -EILSEQ;
+ host->tuning_info.rd_dat_dly++;
+ } else if (value & SPMMC_SDSTATUS_CARD_CRC_CHECK_TIMEOUT) {
+ ret = -ETIMEDOUT;
+ host->tuning_info.rd_crc_dly++;
+ } else if (value & SPMMC_SDSTATUS_CRC_TOKEN_CHECK_ERROR) {
+ ret = -EILSEQ;
+ if (crc_token == 0x5)
+ host->tuning_info.wr_dat_dly++;
+ else
+ host->tuning_info.rd_crc_dly++;
+ }
+ }
+ cmd->error = ret;
+ if (data) {
+ data->error = ret;
+ data->bytes_xfered = 0;
+ }
+ if (!host->tuning_info.need_tuning && host->tuning_info.enable_tuning)
+ cmd->retries = SPMMC_MAX_RETRIES;
+ spmmc_sw_reset(host);
+
+ if (host->tuning_info.enable_tuning) {
+ timing_cfg0 &= ~SPMMC_SD_READ_CRC_DELAY;
+ timing_cfg0 |= FIELD_PREP(SPMMC_SD_READ_CRC_DELAY,
+ host->tuning_info.rd_crc_dly);
+ timing_cfg0 &= ~SPMMC_SD_READ_DATA_DELAY;
+ timing_cfg0 |= FIELD_PREP(SPMMC_SD_READ_DATA_DELAY,
+ host->tuning_info.rd_dat_dly);
+ timing_cfg0 &= ~SPMMC_SD_READ_RESPONSE_DELAY;
+ timing_cfg0 |= FIELD_PREP(SPMMC_SD_READ_RESPONSE_DELAY,
+ host->tuning_info.rd_rsp_dly);
+ timing_cfg0 &= ~SPMMC_SD_WRITE_COMMAND_DELAY;
+ timing_cfg0 |= FIELD_PREP(SPMMC_SD_WRITE_COMMAND_DELAY,
+ host->tuning_info.wr_cmd_dly);
+ timing_cfg0 &= ~SPMMC_SD_WRITE_DATA_DELAY;
+ timing_cfg0 |= FIELD_PREP(SPMMC_SD_WRITE_DATA_DELAY,
+ host->tuning_info.wr_dat_dly);
+ writel(timing_cfg0, host->base + SPMMC_SD_TIMING_CONFIG0_REG);
+ }
+ } else if (data) {
+ data->error = 0;
+ data->bytes_xfered = data->blocks * data->blksz;
+ }
+ host->tuning_info.need_tuning = ret;
+
+ return ret;
+}
+
+/*
+ * the strategy is:
+ * 1. if several continuous delays are acceptable, we choose a middle one;
+ * 2. otherwise, we choose the first one.
+ */
+static inline int spmmc_find_best_delay(u8 candidate_dly)
+{
+ int f, w, value;
+
+ if (!candidate_dly)
+ return 0;
+ f = ffs(candidate_dly) - 1;
+ w = hweight8(candidate_dly);
+ value = ((1 << w) - 1) << f;
+ if (0xff == (value & ~candidate_dly))
+ return (f + w / 2);
+ else
+ return (f);
+}
+
+static void spmmc_xfer_data_pio(struct spmmc_host *host, struct mmc_data *data)
+{
+ u32 *buf;
+ int data_left = data->blocks * data->blksz;
+ int consumed, remain;
+
+ struct sg_mapping_iter sg_miter;
+ unsigned int flags = 0;
+
+ if (data->flags & MMC_DATA_WRITE)
+ flags |= SG_MITER_FROM_SG;
+ else
+ flags |= SG_MITER_TO_SG;
+ sg_miter_start(&sg_miter, data->sg, data->sg_len, flags);
+ while (data_left > 0) {
+ consumed = 0;
+ if (!sg_miter_next(&sg_miter))
+ break;
+ buf = sg_miter.addr;
+ remain = sg_miter.length;
+ do {
+ if (data->flags & MMC_DATA_WRITE) {
+ if (spmmc_wait_txbuf_empty(host))
+ goto done;
+ writel(*buf, host->base + SPMMC_SD_PIODATATX_REG);
+ } else {
+ if (spmmc_wait_rxbuf_full(host))
+ goto done;
+ *buf = readl(host->base + SPMMC_SD_PIODATARX_REG);
+ }
+ buf++;
+ /* tx/rx 4 bytes one time in pio mode */
+ consumed += 4;
+ remain -= 4;
+ } while (remain);
+ sg_miter.consumed = consumed;
+ data_left -= consumed;
+ }
+done:
+ sg_miter_stop(&sg_miter);
+}
+
+static void spmmc_controller_init(struct spmmc_host *host)
+{
+ u32 value;
+ int ret = reset_control_assert(host->rstc);
+
+ if (!ret) {
+ usleep_range(1000, 1250);
+ ret = reset_control_deassert(host->rstc);
+ }
+
+ value = readl(host->base + SPMMC_CARD_MEDIATYPE_SRCDST_REG);
+ value &= ~SPMMC_MEDIA_TYPE;
+ value |= FIELD_PREP(SPMMC_MEDIA_TYPE, SPMMC_MEDIA_SD);
+ writel(value, host->base + SPMMC_CARD_MEDIATYPE_SRCDST_REG);
+}
+
+/*
+ * 1. unmap scatterlist if needed;
+ * 2. get response & check error conditions;
+ * 3. notify mmc layer the request is done
+ */
+static void spmmc_finish_request(struct spmmc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ if (!mrq)
+ return;
+
+ cmd = mrq->cmd;
+ data = mrq->data;
+
+ if (data && SPMMC_DMA_MODE == host->dmapio_mode) {
+ dma_unmap_sg(host->mmc->parent, data->sg, data->sg_len, mmc_get_dma_dir(data));
+ host->dma_use_int = 0;
+ }
+
+ spmmc_get_rsp(host, cmd);
+ spmmc_check_error(host, mrq);
+ if (mrq->stop)
+ spmmc_send_stop_cmd(host);
+
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
+/* Interrupt Service Routine */
+static irqreturn_t spmmc_irq(int irq, void *dev_id)
+{
+ struct spmmc_host *host = dev_id;
+ u32 value = readl(host->base + SPMMC_SD_INT_REG);
+
+ if ((value & SPMMC_SDINT_SDCMP) && (value & SPMMC_SDINT_SDCMPEN)) {
+ value &= ~SPMMC_SDINT_SDCMPEN;
+ value |= SPMMC_SDINT_SDCMPCLR;
+ writel(value, host->base + SPMMC_SD_INT_REG);
+ return IRQ_WAKE_THREAD;
+ }
+ return IRQ_HANDLED;
+}
+
+static void spmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct spmmc_host *host = mmc_priv(mmc);
+ struct mmc_data *data;
+ struct mmc_command *cmd;
+
+ host->mrq = mrq;
+ data = mrq->data;
+ cmd = mrq->cmd;
+
+ spmmc_prepare_cmd(host, cmd);
+ /* we need manually read response R2. */
+ if (cmd->flags & MMC_RSP_136) {
+ spmmc_trigger_transaction(host);
+ spmmc_get_rsp(host, cmd);
+ spmmc_wait_finish(host);
+ spmmc_check_error(host, mrq);
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+ } else {
+ if (data)
+ spmmc_prepare_data(host, data);
+
+ if (host->dmapio_mode == SPMMC_PIO_MODE && data) {
+ u32 value;
+ /* pio data transfer do not use interrupt */
+ value = readl(host->base + SPMMC_SD_INT_REG);
+ value &= ~SPMMC_SDINT_SDCMPEN;
+ writel(value, host->base + SPMMC_SD_INT_REG);
+ spmmc_trigger_transaction(host);
+ spmmc_xfer_data_pio(host, data);
+ spmmc_wait_finish(host);
+ spmmc_finish_request(host, mrq);
+ } else {
+ if (host->dma_use_int) {
+ spmmc_trigger_transaction(host);
+ } else {
+ spmmc_trigger_transaction(host);
+ spmmc_wait_finish(host);
+ spmmc_finish_request(host, mrq);
+ }
+ }
+ }
+}
+
+static void spmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct spmmc_host *host = (struct spmmc_host *)mmc_priv(mmc);
+
+ spmmc_set_bus_clk(host, ios->clock);
+ spmmc_set_bus_timing(host, ios->timing);
+ spmmc_set_bus_width(host, ios->bus_width);
+ /* ensure mode is correct, because we might have hw reset the controller */
+ spmmc_set_sdmmc_mode(host);
+}
+
+/*
+ * Return values for the get_cd callback should be:
+ * 0 for a absent card
+ * 1 for a present card
+ * -ENOSYS when not supported (equal to NULL callback)
+ * or a negative errno value when something bad happened
+ */
+static int spmmc_get_cd(struct mmc_host *mmc)
+{
+ int ret = 0;
+
+ if (mmc_host_can_gpio_cd(mmc))
+ ret = mmc_gpio_get_cd(mmc);
+
+ if (ret < 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int spmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct spmmc_host *host = mmc_priv(mmc);
+ u8 smpl_dly = 0, candidate_dly = 0;
+ u32 value;
+
+ host->tuning_info.enable_tuning = 0;
+ do {
+ value = readl(host->base + SPMMC_SD_TIMING_CONFIG0_REG);
+ value &= ~SPMMC_SD_READ_RESPONSE_DELAY;
+ value |= FIELD_PREP(SPMMC_SD_READ_RESPONSE_DELAY, smpl_dly);
+ value &= ~SPMMC_SD_READ_DATA_DELAY;
+ value |= FIELD_PREP(SPMMC_SD_READ_DATA_DELAY, smpl_dly);
+ value &= ~SPMMC_SD_READ_CRC_DELAY;
+ value |= FIELD_PREP(SPMMC_SD_READ_CRC_DELAY, smpl_dly);
+ writel(value, host->base + SPMMC_SD_TIMING_CONFIG0_REG);
+
+ if (!mmc_send_tuning(mmc, opcode, NULL)) {
+ candidate_dly |= (1 << smpl_dly);
+ break;
+ }
+ } while (smpl_dly++ <= SPMMC_MAX_TUNABLE_DLY);
+ host->tuning_info.enable_tuning = 1;
+
+ if (candidate_dly) {
+ smpl_dly = spmmc_find_best_delay(candidate_dly);
+ value = readl(host->base + SPMMC_SD_TIMING_CONFIG0_REG);
+ value &= ~SPMMC_SD_READ_RESPONSE_DELAY;
+ value |= FIELD_PREP(SPMMC_SD_READ_RESPONSE_DELAY, smpl_dly);
+ value &= ~SPMMC_SD_READ_DATA_DELAY;
+ value |= FIELD_PREP(SPMMC_SD_READ_DATA_DELAY, smpl_dly);
+ value &= ~SPMMC_SD_READ_CRC_DELAY;
+ value |= FIELD_PREP(SPMMC_SD_READ_CRC_DELAY, smpl_dly);
+ writel(value, host->base + SPMMC_SD_TIMING_CONFIG0_REG);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static const struct mmc_host_ops spmmc_ops = {
+ .request = spmmc_request,
+ .set_ios = spmmc_set_ios,
+ .get_cd = spmmc_get_cd,
+ .execute_tuning = spmmc_execute_tuning,
+};
+
+static irqreturn_t spmmc_func_finish_req(int irq, void *dev_id)
+{
+ struct spmmc_host *host = dev_id;
+
+ spmmc_finish_request(host, host->mrq);
+
+ return IRQ_HANDLED;
+}
+
+static int spmmc_drv_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct resource *res;
+ struct spmmc_host *host;
+ int ret = 0;
+
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct spmmc_host));
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->dmapio_mode = SPMMC_DMA_MODE;
+ host->dma_int_threshold = 1024;
+
+ host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
+
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(host->clk), "clk get fail\n");
+
+ host->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(host->rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(host->rstc), "rst get fail\n");
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
+ return host->irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, host->irq,
+ spmmc_irq, spmmc_func_finish_req, IRQF_SHARED,
+ NULL, host);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to enable clk\n");
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto clk_disable;
+
+ mmc->ops = &spmmc_ops;
+ mmc->f_min = SPMMC_MIN_CLK;
+ if (mmc->f_max > SPMMC_MAX_CLK)
+ mmc->f_max = SPMMC_MAX_CLK;
+
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret)
+ goto clk_disable;
+
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->max_seg_size = SPMMC_MAX_BLK_COUNT * 512;
+ mmc->max_segs = SPMMC_MAX_DMA_MEMORY_SECTORS;
+ mmc->max_req_size = SPMMC_MAX_BLK_COUNT * 512;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = SPMMC_MAX_BLK_COUNT;
+
+ dev_set_drvdata(&pdev->dev, host);
+ spmmc_controller_init(host);
+ spmmc_set_sdmmc_mode(host);
+ host->tuning_info.enable_tuning = 1;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto pm_disable;
+
+ return 0;
+
+pm_disable:
+ pm_runtime_disable(&pdev->dev);
+
+clk_disable:
+ clk_disable_unprepare(host->clk);
+ return ret;
+}
+
+static void spmmc_drv_remove(struct platform_device *dev)
+{
+ struct spmmc_host *host = platform_get_drvdata(dev);
+
+ mmc_remove_host(host->mmc);
+ pm_runtime_get_sync(&dev->dev);
+ clk_disable_unprepare(host->clk);
+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+}
+
+static int spmmc_pm_runtime_suspend(struct device *dev)
+{
+ struct spmmc_host *host;
+
+ host = dev_get_drvdata(dev);
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+static int spmmc_pm_runtime_resume(struct device *dev)
+{
+ struct spmmc_host *host;
+
+ host = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(host->clk);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(spmmc_pm_ops, spmmc_pm_runtime_suspend,
+ spmmc_pm_runtime_resume, NULL);
+
+static const struct of_device_id spmmc_of_table[] = {
+ {
+ .compatible = "sunplus,sp7021-mmc",
+ },
+ {/* sentinel */}
+};
+MODULE_DEVICE_TABLE(of, spmmc_of_table);
+
+static struct platform_driver spmmc_driver = {
+ .probe = spmmc_drv_probe,
+ .remove = spmmc_drv_remove,
+ .driver = {
+ .name = "spmmc",
+ .pm = pm_ptr(&spmmc_pm_ops),
+ .of_match_table = spmmc_of_table,
+ },
+};
+module_platform_driver(spmmc_driver);
+
+MODULE_AUTHOR("Tony Huang <tonyhuang.sunplus@gmail.com>");
+MODULE_AUTHOR("Li-hao Kuo <lhjeff911@gmail.com>");
+MODULE_DESCRIPTION("Sunplus MMC controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 2702736a1c57..8dbcff53a631 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -377,8 +377,9 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
pdes[i].buf_addr_ptr1 =
cpu_to_le32(sg_dma_address(&data->sg[i]) >>
host->cfg->idma_des_shift);
- pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc >>
- host->cfg->idma_des_shift);
+ pdes[i].buf_addr_ptr2 =
+ cpu_to_le32(next_desc >>
+ host->cfg->idma_des_shift);
}
pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD);
@@ -1115,7 +1116,7 @@ static const struct mmc_host_ops sunxi_mmc_ops = {
.get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = sunxi_mmc_enable_sdio_irq,
.start_signal_voltage_switch = sunxi_mmc_volt_switch,
- .hw_reset = sunxi_mmc_hw_reset,
+ .card_hw_reset = sunxi_mmc_hw_reset,
.card_busy = sunxi_mmc_card_busy,
};
@@ -1167,6 +1168,14 @@ static const struct sunxi_mmc_cfg sun9i_a80_cfg = {
.can_calibrate = false,
};
+static const struct sunxi_mmc_cfg sun20i_d1_cfg = {
+ .idma_des_size_bits = 13,
+ .idma_des_shift = 2,
+ .can_calibrate = true,
+ .mask_data0 = true,
+ .needs_new_timings = true,
+};
+
static const struct sunxi_mmc_cfg sun50i_a64_cfg = {
.idma_des_size_bits = 16,
.clk_delays = NULL,
@@ -1182,10 +1191,9 @@ static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = {
.needs_new_timings = true,
};
-static const struct sunxi_mmc_cfg sun50i_a100_cfg = {
+static const struct sunxi_mmc_cfg sun50i_h616_cfg = {
.idma_des_size_bits = 16,
.idma_des_shift = 2,
- .clk_delays = NULL,
.can_calibrate = true,
.mask_data0 = true,
.needs_new_timings = true,
@@ -1205,10 +1213,12 @@ static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun7i-a20-mmc", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun8i-a83t-emmc", .data = &sun8i_a83t_emmc_cfg },
{ .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg },
+ { .compatible = "allwinner,sun20i-d1-mmc", .data = &sun20i_d1_cfg },
{ .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg },
- { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun50i_a100_cfg },
+ { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun20i_d1_cfg },
{ .compatible = "allwinner,sun50i-a100-emmc", .data = &sun50i_a100_emmc_cfg },
+ { .compatible = "allwinner,sun50i-h616-mmc", .data = &sun50i_h616_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
@@ -1340,8 +1350,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return ret;
host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0) {
- ret = -EINVAL;
+ if (host->irq < 0) {
+ ret = host->irq;
goto error_disable_mmc;
}
@@ -1359,11 +1369,10 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
struct mmc_host *mmc;
int ret;
- mmc = mmc_alloc_host(sizeof(struct sunxi_mmc_host), &pdev->dev);
- if (!mmc) {
- dev_err(&pdev->dev, "mmc alloc host failed\n");
- return -ENOMEM;
- }
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
+ if (!mmc)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "mmc alloc host failed\n");
platform_set_drvdata(pdev, mmc);
host = mmc_priv(mmc);
@@ -1373,15 +1382,13 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
ret = sunxi_mmc_resource_request(host, pdev);
if (ret)
- goto error_free_host;
+ return ret;
host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
&host->sg_dma, GFP_KERNEL);
- if (!host->sg_cpu) {
- dev_err(&pdev->dev, "Failed to allocate DMA descriptor mem\n");
- ret = -ENOMEM;
- goto error_free_host;
- }
+ if (!host->sg_cpu)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "Failed to allocate DMA descriptor mem\n");
if (host->cfg->ccu_has_timings_switch) {
/*
@@ -1471,27 +1478,23 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
error_free_dma:
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
-error_free_host:
- mmc_free_host(mmc);
return ret;
}
-static int sunxi_mmc_remove(struct platform_device *pdev)
+static void sunxi_mmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct sunxi_mmc_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
- pm_runtime_force_suspend(&pdev->dev);
- disable_irq(host->irq);
- sunxi_mmc_disable(host);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev)) {
+ disable_irq(host->irq);
+ sunxi_mmc_disable(host);
+ }
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
- mmc_free_host(mmc);
-
- return 0;
}
-#ifdef CONFIG_PM
static int sunxi_mmc_runtime_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
@@ -1526,14 +1529,10 @@ static int sunxi_mmc_runtime_suspend(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sunxi_mmc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(sunxi_mmc_runtime_suspend,
- sunxi_mmc_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(sunxi_mmc_runtime_suspend, sunxi_mmc_runtime_resume, NULL)
};
static struct platform_driver sunxi_mmc_driver = {
@@ -1541,7 +1540,7 @@ static struct platform_driver sunxi_mmc_driver = {
.name = "sunxi-mmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sunxi_mmc_of_match,
- .pm = &sunxi_mmc_pm_ops,
+ .pm = pm_ptr(&sunxi_mmc_pm_ops),
},
.probe = sunxi_mmc_probe,
.remove = sunxi_mmc_remove,
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 9fdf7ea06e3f..2cd69c9e9571 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -13,6 +13,7 @@
#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <linux/module.h>
+#include <linux/workqueue.h>
#include <asm/io.h>
#define DRIVER_NAME "tifm_sd"
@@ -97,7 +98,7 @@ struct tifm_sd {
unsigned int clk_div;
unsigned long timeout_jiffies;
- struct tasklet_struct finish_tasklet;
+ struct work_struct finish_bh_work;
struct timer_list timer;
struct mmc_request *req;
@@ -116,7 +117,7 @@ static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
unsigned char *buf;
unsigned int pos = 0, val;
- buf = kmap_atomic(pg) + off;
+ buf = kmap_local_page(pg) + off;
if (host->cmd_flags & DATA_CARRY) {
buf[pos++] = host->bounce_buf_data[0];
host->cmd_flags &= ~DATA_CARRY;
@@ -132,7 +133,7 @@ static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
}
buf[pos++] = (val >> 8) & 0xff;
}
- kunmap_atomic(buf - off);
+ kunmap_local(buf - off);
}
static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
@@ -142,7 +143,7 @@ static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
unsigned char *buf;
unsigned int pos = 0, val;
- buf = kmap_atomic(pg) + off;
+ buf = kmap_local_page(pg) + off;
if (host->cmd_flags & DATA_CARRY) {
val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00);
writel(val, sock->addr + SOCK_MMCSD_DATA);
@@ -159,7 +160,7 @@ static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
val |= (buf[pos++] << 8) & 0xff00;
writel(val, sock->addr + SOCK_MMCSD_DATA);
}
- kunmap_atomic(buf - off);
+ kunmap_local(buf - off);
}
static void tifm_sd_transfer_data(struct tifm_sd *host)
@@ -190,7 +191,7 @@ static void tifm_sd_transfer_data(struct tifm_sd *host)
}
off = sg[host->sg_pos].offset + host->block_pos;
- pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
+ pg = sg_page(&sg[host->sg_pos]) + (off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
@@ -210,13 +211,13 @@ static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off,
struct page *src, unsigned int src_off,
unsigned int count)
{
- unsigned char *src_buf = kmap_atomic(src) + src_off;
- unsigned char *dst_buf = kmap_atomic(dst) + dst_off;
+ unsigned char *src_buf = kmap_local_page(src) + src_off;
+ unsigned char *dst_buf = kmap_local_page(dst) + dst_off;
memcpy(dst_buf, src_buf, count);
- kunmap_atomic(dst_buf - dst_off);
- kunmap_atomic(src_buf - src_off);
+ kunmap_local(dst_buf - dst_off);
+ kunmap_local(src_buf - src_off);
}
static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
@@ -239,7 +240,7 @@ static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
}
off = sg[host->sg_pos].offset + host->block_pos;
- pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
+ pg = sg_page(&sg[host->sg_pos]) + (off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
@@ -264,16 +265,13 @@ static int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data)
unsigned int t_size = TIFM_DMA_TSIZE * r_data->blksz;
unsigned int dma_len, dma_blk_cnt, dma_off;
struct scatterlist *sg = NULL;
- unsigned long flags;
if (host->sg_pos == host->sg_len)
return 1;
if (host->cmd_flags & DATA_CARRY) {
host->cmd_flags &= ~DATA_CARRY;
- local_irq_save(flags);
tifm_sd_bounce_block(host, r_data);
- local_irq_restore(flags);
if (host->sg_pos == host->sg_len)
return 1;
}
@@ -300,11 +298,9 @@ static int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data)
if (dma_blk_cnt)
sg = &r_data->sg[host->sg_pos];
else if (dma_len) {
- if (r_data->flags & MMC_DATA_WRITE) {
- local_irq_save(flags);
+ if (r_data->flags & MMC_DATA_WRITE)
tifm_sd_bounce_block(host, r_data);
- local_irq_restore(flags);
- } else
+ else
host->cmd_flags |= DATA_CARRY;
sg = &host->bounce_buf;
@@ -468,7 +464,7 @@ static void tifm_sd_check_status(struct tifm_sd *host)
}
}
finish_request:
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
/* Called from interrupt handler */
@@ -506,7 +502,6 @@ static void tifm_sd_card_event(struct tifm_dev *sock)
unsigned int host_status = 0;
int cmd_error = 0;
struct mmc_command *cmd = NULL;
- unsigned long flags;
spin_lock(&sock->lock);
host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
@@ -570,9 +565,7 @@ static void tifm_sd_card_event(struct tifm_dev *sock)
if (host_status & (TIFM_MMCSD_AE | TIFM_MMCSD_AF
| TIFM_MMCSD_BRS)) {
- local_irq_save(flags);
tifm_sd_transfer_data(host);
- local_irq_restore(flags);
host_status &= ~TIFM_MMCSD_AE;
}
}
@@ -669,8 +662,8 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
if(1 != tifm_map_sg(sock, &host->bounce_buf, 1,
r_data->flags & MMC_DATA_WRITE
- ? PCI_DMA_TODEVICE
- : PCI_DMA_FROMDEVICE)) {
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE)) {
pr_err("%s : scatterlist map failed\n",
dev_name(&sock->dev));
mrq->cmd->error = -ENOMEM;
@@ -680,15 +673,15 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
r_data->sg_len,
r_data->flags
& MMC_DATA_WRITE
- ? PCI_DMA_TODEVICE
- : PCI_DMA_FROMDEVICE);
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
if (host->sg_len < 1) {
pr_err("%s : scatterlist map failed\n",
dev_name(&sock->dev));
tifm_unmap_sg(sock, &host->bounce_buf, 1,
r_data->flags & MMC_DATA_WRITE
- ? PCI_DMA_TODEVICE
- : PCI_DMA_FROMDEVICE);
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
mrq->cmd->error = -ENOMEM;
goto err_out;
}
@@ -731,9 +724,9 @@ err_out:
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_end_cmd(struct tasklet_struct *t)
+static void tifm_sd_end_cmd(struct work_struct *t)
{
- struct tifm_sd *host = from_tasklet(host, t, finish_tasklet);
+ struct tifm_sd *host = from_work(host, t, finish_bh_work);
struct tifm_dev *sock = host->dev;
struct mmc_host *mmc = tifm_get_drvdata(sock);
struct mmc_request *mrq;
@@ -742,7 +735,7 @@ static void tifm_sd_end_cmd(struct tasklet_struct *t)
spin_lock_irqsave(&sock->lock, flags);
- del_timer(&host->timer);
+ timer_delete(&host->timer);
mrq = host->req;
host->req = NULL;
@@ -762,10 +755,10 @@ static void tifm_sd_end_cmd(struct tasklet_struct *t)
} else {
tifm_unmap_sg(sock, &host->bounce_buf, 1,
(r_data->flags & MMC_DATA_WRITE)
- ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
tifm_unmap_sg(sock, r_data->sg, r_data->sg_len,
(r_data->flags & MMC_DATA_WRITE)
- ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
r_data->bytes_xfered = r_data->blocks
@@ -784,7 +777,7 @@ static void tifm_sd_end_cmd(struct tasklet_struct *t)
static void tifm_sd_abort(struct timer_list *t)
{
- struct tifm_sd *host = from_timer(host, t, timer);
+ struct tifm_sd *host = timer_container_of(host, t, timer);
pr_err("%s : card failed to respond for a long period of time "
"(%x, %x)\n",
@@ -954,7 +947,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
return rc;
}
- mmc = mmc_alloc_host(sizeof(struct tifm_sd), &sock->dev);
+ mmc = devm_mmc_alloc_host(&sock->dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
@@ -968,7 +961,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
*/
mmc->max_busy_timeout = TIFM_MMCSD_REQ_TIMEOUT_MS;
- tasklet_setup(&host->finish_tasklet, tifm_sd_end_cmd);
+ INIT_WORK(&host->finish_bh_work, tifm_sd_end_cmd);
timer_setup(&host->timer, tifm_sd_abort, 0);
mmc->ops = &tifm_sd_ops;
@@ -989,10 +982,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
if (!rc)
rc = mmc_add_host(mmc);
- if (!rc)
- return 0;
- mmc_free_host(mmc);
return rc;
}
@@ -1007,7 +997,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
spin_unlock_irqrestore(&sock->lock, flags);
- tasklet_kill(&host->finish_tasklet);
+ cancel_work_sync(&host->finish_bh_work);
spin_lock_irqsave(&sock->lock, flags);
if (host->req) {
@@ -1017,13 +1007,11 @@ static void tifm_sd_remove(struct tifm_dev *sock)
host->req->cmd->error = -ENOMEDIUM;
if (host->req->stop)
host->req->stop->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
spin_unlock_irqrestore(&sock->lock, flags);
mmc_remove_host(mmc);
dev_dbg(&sock->dev, "after remove\n");
-
- mmc_free_host(mmc);
}
#ifdef CONFIG_PM
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
deleted file mode 100644
index b55a29c53d9c..000000000000
--- a/drivers/mmc/host/tmio_mmc.c
+++ /dev/null
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for the MMC / SD / SDIO cell found in:
- *
- * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
- *
- * Copyright (C) 2017 Renesas Electronics Corporation
- * Copyright (C) 2017 Horms Solutions, Simon Horman
- * Copyright (C) 2007 Ian Molton
- * Copyright (C) 2004 Ian Molton
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mmc/host.h>
-#include <linux/module.h>
-#include <linux/pagemap.h>
-#include <linux/scatterlist.h>
-
-#include "tmio_mmc.h"
-
-/* Registers specific to this variant */
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
-
-static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
-{
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-
- usleep_range(10000, 11000);
- sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
- usleep_range(10000, 11000);
-}
-
-static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
-{
- sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
- usleep_range(10000, 11000);
-
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-
- usleep_range(10000, 11000);
-}
-
-static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
- unsigned int new_clock)
-{
- unsigned int divisor;
- u32 clk = 0;
- int clk_sel;
-
- if (new_clock == 0) {
- tmio_mmc_clk_stop(host);
- return;
- }
-
- divisor = host->pdata->hclk / new_clock;
-
- /* bit7 set: 1/512, ... bit0 set: 1/4, all bits clear: 1/2 */
- clk_sel = (divisor <= 1);
- clk = clk_sel ? 0 : (roundup_pow_of_two(divisor) >> 2);
-
- host->pdata->set_clk_div(host->pdev, clk_sel);
-
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
- usleep_range(10000, 11000);
-
- tmio_mmc_clk_start(host);
-}
-
-static void tmio_mmc_reset(struct tmio_mmc_host *host)
-{
- sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
- usleep_range(10000, 11000);
- sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
- usleep_range(10000, 11000);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tmio_mmc_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- int ret;
-
- ret = pm_runtime_force_suspend(dev);
-
- /* Tell MFD core it can disable us now.*/
- if (!ret && cell->disable)
- cell->disable(pdev);
-
- return ret;
-}
-
-static int tmio_mmc_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- int ret = 0;
-
- /* Tell the MFD core we are ready to be enabled */
- if (cell->resume)
- ret = cell->resume(pdev);
-
- if (!ret)
- ret = pm_runtime_force_resume(dev);
-
- return ret;
-}
-#endif
-
-static int tmio_mmc_probe(struct platform_device *pdev)
-{
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct tmio_mmc_data *pdata;
- struct tmio_mmc_host *host;
- struct resource *res;
- int ret = -EINVAL, irq;
-
- if (pdev->num_resources != 2)
- goto out;
-
- pdata = pdev->dev.platform_data;
- if (!pdata || !pdata->hclk)
- goto out;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto out;
- }
-
- /* Tell the MFD core we are ready to be enabled */
- if (cell->enable) {
- ret = cell->enable(pdev);
- if (ret)
- goto out;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -EINVAL;
- goto cell_disable;
- }
-
- host = tmio_mmc_host_alloc(pdev, pdata);
- if (IS_ERR(host)) {
- ret = PTR_ERR(host);
- goto cell_disable;
- }
-
- /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
- host->bus_shift = resource_size(res) >> 10;
- host->set_clock = tmio_mmc_set_clock;
- host->reset = tmio_mmc_reset;
-
- host->mmc->f_max = pdata->hclk;
- host->mmc->f_min = pdata->hclk / 512;
-
- ret = tmio_mmc_host_probe(host);
- if (ret)
- goto host_free;
-
- ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq,
- IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), host);
- if (ret)
- goto host_remove;
-
- pr_info("%s at 0x%p irq %d\n", mmc_hostname(host->mmc), host->ctl, irq);
-
- return 0;
-
-host_remove:
- tmio_mmc_host_remove(host);
-host_free:
- tmio_mmc_host_free(host);
-cell_disable:
- if (cell->disable)
- cell->disable(pdev);
-out:
- return ret;
-}
-
-static int tmio_mmc_remove(struct platform_device *pdev)
-{
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct tmio_mmc_host *host = platform_get_drvdata(pdev);
-
- tmio_mmc_host_remove(host);
- if (cell->disable)
- cell->disable(pdev);
-
- return 0;
-}
-
-/* ------------------- device registration ----------------------- */
-
-static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume)
- SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
- tmio_mmc_host_runtime_resume, NULL)
-};
-
-static struct platform_driver tmio_mmc_driver = {
- .driver = {
- .name = "tmio-mmc",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &tmio_mmc_dev_pm_ops,
- },
- .probe = tmio_mmc_probe,
- .remove = tmio_mmc_remove,
-};
-
-module_platform_driver(tmio_mmc_driver);
-
-MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");
-MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tmio-mmc");
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index f936aad945ce..b9de03325c58 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -16,11 +16,13 @@
#include <linux/dmaengine.h>
#include <linux/highmem.h>
+#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
@@ -42,6 +44,8 @@
#define CTL_DMA_ENABLE 0xd8
#define CTL_RESET_SD 0xe0
#define CTL_VERSION 0xe2
+#define CTL_SDIF_MODE 0xe6 /* only known on R-Car 2+ */
+#define CTL_SD_STATUS 0xf2 /* only known on RZ/{G2L,G3E,V2H} */
/* Definitions for values the CTL_STOP_INTERNAL_ACTION register can take */
#define TMIO_STOP_STP BIT(0)
@@ -98,6 +102,13 @@
/* Definitions for values the CTL_DMA_ENABLE register can take */
#define DMA_ENABLE_DMASDRW BIT(1)
+/* Definitions for values the CTL_SDIF_MODE register can take */
+#define SDIF_MODE_HS400 BIT(0) /* only known on R-Car 2+ */
+
+/* Definitions for values the CTL_SD_STATUS register can take */
+#define SD_STATUS_PWEN BIT(0) /* only known on RZ/{G3E,V2H} */
+#define SD_STATUS_IOVS BIT(16) /* only known on RZ/{G3E,V2H} */
+
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
@@ -124,6 +135,7 @@ struct tmio_mmc_dma_ops {
/* optional */
void (*end)(struct tmio_mmc_host *host); /* held host->lock */
+ bool (*dma_irq)(struct tmio_mmc_host *host);
};
struct tmio_mmc_host {
@@ -134,9 +146,6 @@ struct tmio_mmc_host {
struct mmc_host *mmc;
struct mmc_host_ops ops;
- /* Callbacks for clock / power control */
- void (*set_pwr)(struct platform_device *host, int state);
-
/* pio related stuff */
struct scatterlist *sg_ptr;
struct scatterlist *sg_orig;
@@ -151,7 +160,7 @@ struct tmio_mmc_host {
bool dma_on;
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
- struct tasklet_struct dma_issue;
+ struct work_struct dma_issue;
struct scatterlist bounce_sg;
u8 *bounce_buf;
@@ -181,21 +190,17 @@ struct tmio_mmc_host {
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
- void (*reset)(struct tmio_mmc_host *host);
+ void (*reset)(struct tmio_mmc_host *host, bool preserve);
bool (*check_retune)(struct tmio_mmc_host *host, struct mmc_request *mrq);
void (*fixup_request)(struct tmio_mmc_host *host, struct mmc_request *mrq);
unsigned int (*get_timeout_cycles)(struct tmio_mmc_host *host);
-
- void (*prepare_hs400_tuning)(struct tmio_mmc_host *host);
- void (*hs400_downgrade)(struct tmio_mmc_host *host);
- void (*hs400_complete)(struct tmio_mmc_host *host);
+ void (*sdio_irq)(struct tmio_mmc_host *host);
const struct tmio_mmc_dma_ops *dma_ops;
};
struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
struct tmio_mmc_data *pdata);
-void tmio_mmc_host_free(struct tmio_mmc_host *host);
int tmio_mmc_host_probe(struct tmio_mmc_host *host);
void tmio_mmc_host_remove(struct tmio_mmc_host *host);
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
@@ -204,24 +209,8 @@ void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
irqreturn_t tmio_mmc_irq(int irq, void *devid);
-static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
- unsigned long *flags)
-{
- local_irq_save(*flags);
- return kmap_atomic(sg_page(sg)) + sg->offset;
-}
-
-static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
- unsigned long *flags, void *virt)
-{
- kunmap_atomic(virt - sg->offset);
- local_irq_restore(*flags);
-}
-
-#ifdef CONFIG_PM
int tmio_mmc_host_runtime_suspend(struct device *dev);
int tmio_mmc_host_runtime_resume(struct device *dev);
-#endif
static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
@@ -241,12 +230,31 @@ static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host,
ioread16(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
+static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+ return ioread32(host->ctl + (addr << host->bus_shift));
+}
+
static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
u32 *buf, int count)
{
ioread32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
+#ifdef CONFIG_64BIT
+static inline void sd_ctrl_read64_rep(struct tmio_mmc_host *host, int addr,
+ u64 *buf, int count)
+{
+ readsq(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static inline void sd_ctrl_write64_rep(struct tmio_mmc_host *host, int addr,
+ const u64 *buf, int count)
+{
+ writesq(host->ctl + (addr << host->bus_shift), buf, count);
+}
+#endif
+
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
u16 val)
{
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 7dfc26f48c18..775e0d9353d5 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -31,13 +31,14 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pagemap.h>
+#include <linux/platform_data/tmio.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
@@ -159,7 +160,6 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
host->sdio_irq_enabled = false;
- pm_runtime_mark_last_busy(mmc_dev(mmc));
pm_runtime_put_autosuspend(mmc_dev(mmc));
}
}
@@ -179,8 +179,17 @@ static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
}
-static void tmio_mmc_reset(struct tmio_mmc_host *host)
+static void tmio_mmc_reset(struct tmio_mmc_host *host, bool preserve)
{
+ u16 card_opt, clk_ctrl, sdif_mode;
+
+ if (preserve) {
+ card_opt = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT);
+ clk_ctrl = sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL);
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ sdif_mode = sd_ctrl_read16(host, CTL_SDIF_MODE);
+ }
+
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
usleep_range(10000, 11000);
@@ -190,11 +199,15 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
tmio_mmc_abort_dma(host);
if (host->reset)
- host->reset(host);
+ host->reset(host, preserve);
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
host->sdcard_irq_mask = host->sdcard_irq_mask_all;
+ if (host->native_hotplug)
+ tmio_mmc_enable_mmc_irqs(host,
+ TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
+
tmio_mmc_set_bus_width(host, host->mmc->ios.bus_width);
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
@@ -202,6 +215,13 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
}
+ if (preserve) {
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, card_opt);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk_ctrl);
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ sd_ctrl_write16(host, CTL_SDIF_MODE, sdif_mode);
+ }
+
if (host->mmc->card)
mmc_retune_needed(host->mmc);
}
@@ -239,12 +259,14 @@ static void tmio_mmc_reset_work(struct work_struct *work)
else
mrq->cmd->error = -ETIMEDOUT;
+ /* No new calls yet, but disallow concurrent tmio_mmc_done_work() */
+ host->mrq = ERR_PTR(-EBUSY);
host->cmd = NULL;
host->data = NULL;
spin_unlock_irqrestore(&host->lock, flags);
- tmio_mmc_reset(host);
+ tmio_mmc_reset(host, true);
/* Ready for new calls */
host->mrq = NULL;
@@ -274,7 +296,6 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host,
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE: c |= RESP_NONE; break;
case MMC_RSP_R1:
- case MMC_RSP_R1_NO_CRC:
c |= RESP_R1; break;
case MMC_RSP_R1B: c |= RESP_R1B; break;
case MMC_RSP_R2: c |= RESP_R2; break;
@@ -328,6 +349,39 @@ static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
/*
* Transfer the data
*/
+#ifdef CONFIG_64BIT
+ if (host->pdata->flags & TMIO_MMC_64BIT_DATA_PORT) {
+ u64 *buf64 = (u64 *)buf;
+ u64 data = 0;
+
+ if (count >= 8) {
+ if (is_read)
+ sd_ctrl_read64_rep(host, CTL_SD_DATA_PORT,
+ buf64, count >> 3);
+ else
+ sd_ctrl_write64_rep(host, CTL_SD_DATA_PORT,
+ buf64, count >> 3);
+ }
+
+ /* if count was multiple of 8 */
+ if (!(count & 0x7))
+ return;
+
+ buf64 += count >> 3;
+ count %= 8;
+
+ if (is_read) {
+ sd_ctrl_read64_rep(host, CTL_SD_DATA_PORT, &data, 1);
+ memcpy(buf64, &data, count);
+ } else {
+ memcpy(&data, buf64, count);
+ sd_ctrl_write64_rep(host, CTL_SD_DATA_PORT, &data, 1);
+ }
+
+ return;
+ }
+#endif
+
if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
u32 data = 0;
u32 *buf32 = (u32 *)buf;
@@ -392,7 +446,6 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
void *sg_virt;
unsigned short *buf;
unsigned int count;
- unsigned long flags;
if (host->dma_on) {
pr_err("PIO IRQ in DMA mode!\n");
@@ -402,8 +455,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
return;
}
- sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
- buf = (unsigned short *)(sg_virt + host->sg_off);
+ sg_virt = kmap_local_page(sg_page(host->sg_ptr));
+ buf = (unsigned short *)(sg_virt + host->sg_ptr->offset + host->sg_off);
count = host->sg_ptr->length - host->sg_off;
if (count > data->blksz)
@@ -417,7 +470,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
host->sg_off += count;
- tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
+ kunmap_local(sg_virt);
if (host->sg_off == host->sg_ptr->length)
tmio_mmc_next_sg(host);
@@ -426,11 +479,11 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
{
if (host->sg_ptr == &host->bounce_sg) {
- unsigned long flags;
- void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
+ void *sg_virt = kmap_local_page(sg_page(host->sg_orig));
- memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
- tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
+ memcpy(sg_virt + host->sg_orig->offset, host->bounce_buf,
+ host->bounce_sg.length);
+ kunmap_local(sg_virt);
}
}
@@ -587,7 +640,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
} else {
tmio_mmc_disable_mmc_irqs(host,
TMIO_MASK_READOP);
- tasklet_schedule(&host->dma_issue);
+ queue_work(system_bh_wq, &host->dma_issue);
}
} else {
if (!host->dma_on) {
@@ -595,7 +648,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
} else {
tmio_mmc_disable_mmc_irqs(host,
TMIO_MASK_WRITEOP);
- tasklet_schedule(&host->dma_issue);
+ queue_work(system_bh_wq, &host->dma_issue);
}
}
} else {
@@ -650,6 +703,9 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
return true;
}
+ if (host->dma_ops && host->dma_ops->dma_irq && host->dma_ops->dma_irq(host))
+ return true;
+
return false;
}
@@ -672,8 +728,11 @@ static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
- if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
+ if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) {
+ if (host->sdio_irq)
+ host->sdio_irq(host);
mmc_signal_sdio_irq(mmc);
+ }
return ireg;
}
@@ -856,9 +915,6 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
/* .set_ios() is returning void, so, no chance to report an error */
- if (host->set_pwr)
- host->set_pwr(host->pdev, 1);
-
if (!IS_ERR(mmc->supply.vmmc)) {
ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
/*
@@ -873,8 +929,8 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
* It seems, VccQ should be switched on after Vcc, this is also what the
* omap_hsmmc.c driver does.
*/
- if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
- ret = regulator_enable(mmc->supply.vqmmc);
+ if (!ret) {
+ ret = mmc_regulator_enable_vqmmc(mmc);
usleep_range(200, 300);
}
@@ -887,14 +943,10 @@ static void tmio_mmc_power_off(struct tmio_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
- if (!IS_ERR(mmc->supply.vqmmc))
- regulator_disable(mmc->supply.vqmmc);
+ mmc_regulator_disable_vqmmc(mmc);
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
-
- if (host->set_pwr)
- host->set_pwr(host->pdev, 0);
}
static unsigned int tmio_mmc_get_timeout_cycles(struct tmio_mmc_host *host)
@@ -948,6 +1000,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
return;
}
+ /* Disallow new mrqs and work handlers to run */
host->mrq = ERR_PTR(-EBUSY);
spin_unlock_irqrestore(&host->lock, flags);
@@ -957,7 +1010,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
tmio_mmc_power_off(host);
/* For R-Car Gen2+, we need to reset SDHI specific SCC */
if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
- host->reset(host);
+ tmio_mmc_reset(host, false);
+
host->set_clock(host, 0);
break;
case MMC_POWER_UP:
@@ -981,8 +1035,9 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
"%s.%d: IOS interrupted: clk %u, mode %u",
current->comm, task_pid_nr(current),
ios->clock, ios->power_mode);
- host->mrq = NULL;
+ /* Ready for new mrqs */
+ host->mrq = NULL;
host->clk_cache = ios->clock;
mutex_unlock(&host->ios_lock);
@@ -1061,7 +1116,7 @@ static void tmio_mmc_of_parse(struct platform_device *pdev,
* For new platforms, please use "disable-wp" instead of
* "toshiba,mmc-wrprotect-disable"
*/
- if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
+ if (of_property_read_bool(np, "toshiba,mmc-wrprotect-disable"))
mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
}
@@ -1077,7 +1132,7 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
if (IS_ERR(ctl))
return ERR_CAST(ctl);
- mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
if (!mmc)
return ERR_PTR(-ENOMEM);
@@ -1090,29 +1145,17 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
mmc->ops = &host->ops;
ret = mmc_of_parse(host->mmc);
- if (ret) {
- host = ERR_PTR(ret);
- goto free;
- }
+ if (ret)
+ return ERR_PTR(ret);
tmio_mmc_of_parse(pdev, mmc);
platform_set_drvdata(pdev, host);
return host;
-free:
- mmc_free_host(mmc);
-
- return host;
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc);
-void tmio_mmc_host_free(struct tmio_mmc_host *host)
-{
- mmc_free_host(host->mmc);
-}
-EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
-
int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
{
struct platform_device *pdev = _host->pdev;
@@ -1133,8 +1176,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
if (pdata->flags & TMIO_MMC_USE_BUSY_TIMEOUT && !_host->get_timeout_cycles)
_host->get_timeout_cycles = tmio_mmc_get_timeout_cycles;
- _host->set_pwr = pdata->set_pwr;
-
ret = tmio_mmc_init_ocr(_host);
if (ret < 0)
return ret;
@@ -1158,13 +1199,14 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
dma_max_mapping_size(&pdev->dev));
mmc->max_seg_size = mmc->max_req_size;
- if (mmc_can_gpio_ro(mmc))
+ if (mmc_host_can_gpio_ro(mmc))
_host->ops.get_ro = mmc_gpio_get_ro;
- if (mmc_can_gpio_cd(mmc))
+ if (mmc_host_can_gpio_cd(mmc))
_host->ops.get_cd = mmc_gpio_get_cd;
- _host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
+ /* must be set before tmio_mmc_reset() */
+ _host->native_hotplug = !(mmc_host_can_gpio_cd(mmc) ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
!mmc_card_is_removable(mmc));
@@ -1183,11 +1225,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
_host->sdcard_irq_mask_all = TMIO_MASK_ALL;
_host->set_clock(_host, 0);
- tmio_mmc_reset(_host);
-
- if (_host->native_hotplug)
- tmio_mmc_enable_mmc_irqs(_host,
- TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
+ tmio_mmc_reset(_host, false);
spin_lock_init(&_host->lock);
mutex_init(&_host->ios_lock);
@@ -1283,15 +1321,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_clk_enable(host);
- tmio_mmc_reset(host);
+ tmio_mmc_reset(host, false);
if (host->clk_cache)
host->set_clock(host, host->clk_cache);
- if (host->native_hotplug)
- tmio_mmc_enable_mmc_irqs(host,
- TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
-
tmio_mmc_enable_dma(host, true);
return 0;
@@ -1299,4 +1333,5 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume);
#endif
+MODULE_DESCRIPTION("TMIO MMC core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c
index 8d037c2071ab..aa5d2511a62b 100644
--- a/drivers/mmc/host/toshsd.c
+++ b/drivers/mmc/host/toshsd.c
@@ -567,7 +567,6 @@ static void toshsd_powerdown(struct toshsd_host *host)
pci_write_config_byte(host->pdev, SD_PCICFG_CLKSTOP, 0);
}
-#ifdef CONFIG_PM_SLEEP
static int toshsd_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -599,7 +598,6 @@ static int toshsd_pm_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -612,7 +610,7 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- mmc = mmc_alloc_host(sizeof(struct toshsd_host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host));
if (!mmc) {
ret = -ENOMEM;
goto err;
@@ -651,7 +649,9 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto unmap;
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto free_irq;
base = pci_resource_start(pdev, 0);
dev_dbg(&pdev->dev, "MMIO %pa, IRQ %d\n", &base, pdev->irq);
@@ -660,12 +660,13 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+free_irq:
+ free_irq(pdev->irq, host);
unmap:
pci_iounmap(pdev, host->ioaddr);
release:
pci_release_regions(pdev);
free:
- mmc_free_host(mmc);
pci_set_drvdata(pdev, NULL);
err:
pci_disable_device(pdev);
@@ -681,21 +682,18 @@ static void toshsd_remove(struct pci_dev *pdev)
free_irq(pdev->irq, host);
pci_iounmap(pdev, host->ioaddr);
pci_release_regions(pdev);
- mmc_free_host(host->mmc);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
}
-static const struct dev_pm_ops toshsd_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(toshsd_pm_suspend, toshsd_pm_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(toshsd_pm_ops, toshsd_pm_suspend, toshsd_pm_resume);
static struct pci_driver toshsd_driver = {
.name = DRIVER_NAME,
.id_table = pci_ids,
.probe = toshsd_probe,
.remove = toshsd_remove,
- .driver.pm = &toshsd_pm_ops,
+ .driver.pm = pm_sleep_ptr(&toshsd_pm_ops),
};
module_pci_driver(toshsd_driver);
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index ccbf9885a52b..1eae2f4b6c1f 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -8,13 +8,14 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <linux/mfd/tmio.h>
+#include <linux/mfd/syscon.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/platform_data/tmio.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include "tmio_mmc.h"
@@ -48,6 +49,12 @@
#define UNIPHIER_SD_DMA_ADDR_L 0x440
#define UNIPHIER_SD_DMA_ADDR_H 0x444
+/* SD control */
+#define UNIPHIER_SDCTRL_CHOFFSET 0x200
+#define UNIPHIER_SDCTRL_MODE 0x30
+#define UNIPHIER_SDCTRL_MODE_UHS1MOD BIT(15)
+#define UNIPHIER_SDCTRL_MODE_SDRSEL BIT(14)
+
/*
* IP is extended to support various features: built-in DMA engine,
* 1/1024 divisor, etc.
@@ -66,6 +73,8 @@ struct uniphier_sd_priv {
struct reset_control *rst_hw;
struct dma_chan *chan;
enum dma_data_direction dma_dir;
+ struct regmap *sdctrl_regmap;
+ u32 sdctrl_ch;
unsigned long clk_rate;
unsigned long caps;
};
@@ -81,9 +90,9 @@ static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable)
}
/* external DMA engine */
-static void uniphier_sd_external_dma_issue(struct tasklet_struct *t)
+static void uniphier_sd_external_dma_issue(struct work_struct *t)
{
- struct tmio_mmc_host *host = from_tasklet(host, t, dma_issue);
+ struct tmio_mmc_host *host = from_work(host, t, dma_issue);
struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
uniphier_sd_dma_endisable(host, 1);
@@ -190,7 +199,7 @@ static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host,
host->chan_rx = chan;
host->chan_tx = chan;
- tasklet_setup(&host->dma_issue, uniphier_sd_external_dma_issue);
+ INIT_WORK(&host->dma_issue, uniphier_sd_external_dma_issue);
}
static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host)
@@ -227,9 +236,9 @@ static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = {
.dataend = uniphier_sd_external_dma_dataend,
};
-static void uniphier_sd_internal_dma_issue(struct tasklet_struct *t)
+static void uniphier_sd_internal_dma_issue(struct work_struct *t)
{
- struct tmio_mmc_host *host = from_tasklet(host, t, dma_issue);
+ struct tmio_mmc_host *host = from_work(host, t, dma_issue);
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
@@ -308,7 +317,7 @@ static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host,
host->chan_tx = (void *)0xdeadbeaf;
- tasklet_setup(&host->dma_issue, uniphier_sd_internal_dma_issue);
+ INIT_WORK(&host->dma_issue, uniphier_sd_internal_dma_issue);
}
static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host)
@@ -420,6 +429,42 @@ static void uniphier_sd_hw_reset(struct mmc_host *mmc)
usleep_range(300, 1000);
}
+static void uniphier_sd_speed_switch(struct tmio_mmc_host *host)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+ unsigned int offset;
+ u32 val = 0;
+
+ if (!(host->mmc->caps & MMC_CAP_UHS))
+ return;
+
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR50 ||
+ host->mmc->ios.timing == MMC_TIMING_UHS_SDR104)
+ val = UNIPHIER_SDCTRL_MODE_SDRSEL;
+
+ offset = UNIPHIER_SDCTRL_CHOFFSET * priv->sdctrl_ch
+ + UNIPHIER_SDCTRL_MODE;
+ regmap_write_bits(priv->sdctrl_regmap, offset,
+ UNIPHIER_SDCTRL_MODE_SDRSEL, val);
+}
+
+static void uniphier_sd_uhs_enable(struct tmio_mmc_host *host, bool uhs_en)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+ unsigned int offset;
+ u32 val;
+
+ if (!(host->mmc->caps & MMC_CAP_UHS))
+ return;
+
+ val = (uhs_en) ? UNIPHIER_SDCTRL_MODE_UHS1MOD : 0;
+
+ offset = UNIPHIER_SDCTRL_CHOFFSET * priv->sdctrl_ch
+ + UNIPHIER_SDCTRL_MODE;
+ regmap_write_bits(priv->sdctrl_regmap, offset,
+ UNIPHIER_SDCTRL_MODE_UHS1MOD, val);
+}
+
static void uniphier_sd_set_clock(struct tmio_mmc_host *host,
unsigned int clock)
{
@@ -433,6 +478,8 @@ static void uniphier_sd_set_clock(struct tmio_mmc_host *host,
tmp &= ~CLK_CTL_SCLKEN;
writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
+ uniphier_sd_speed_switch(host);
+
if (clock == 0)
return;
@@ -500,14 +547,17 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
struct pinctrl_state *pinstate = NULL;
u32 val, tmp;
+ bool uhs_en;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
val = UNIPHIER_SD_VOLT_330;
+ uhs_en = false;
break;
case MMC_SIGNAL_VOLTAGE_180:
val = UNIPHIER_SD_VOLT_180;
pinstate = priv->pinstate_uhs;
+ uhs_en = true;
break;
default:
return -ENOTSUPP;
@@ -523,12 +573,19 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
else
pinctrl_select_default_state(mmc_dev(mmc));
+ uniphier_sd_uhs_enable(host, uhs_en);
+
return 0;
}
-static int uniphier_sd_uhs_init(struct tmio_mmc_host *host,
- struct uniphier_sd_priv *priv)
+static int uniphier_sd_uhs_init(struct tmio_mmc_host *host)
{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+ struct device *dev = &host->pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ int ret;
+
priv->pinctrl = devm_pinctrl_get(mmc_dev(host->mmc));
if (IS_ERR(priv->pinctrl))
return PTR_ERR(priv->pinctrl);
@@ -537,8 +594,20 @@ static int uniphier_sd_uhs_init(struct tmio_mmc_host *host,
if (IS_ERR(priv->pinstate_uhs))
return PTR_ERR(priv->pinstate_uhs);
- host->ops.start_signal_voltage_switch =
- uniphier_sd_start_signal_voltage_switch;
+ ret = of_parse_phandle_with_fixed_args(np,
+ "socionext,syscon-uhs-mode",
+ 1, 0, &args);
+ if (ret) {
+ dev_err(dev, "Can't get syscon-uhs-mode property\n");
+ return ret;
+ }
+ priv->sdctrl_regmap = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(priv->sdctrl_regmap)) {
+ dev_err(dev, "Can't map syscon-uhs-mode\n");
+ return PTR_ERR(priv->sdctrl_regmap);
+ }
+ priv->sdctrl_ch = args.args[0];
return 0;
}
@@ -594,19 +663,21 @@ static int uniphier_sd_probe(struct platform_device *pdev)
priv->rst_hw = devm_reset_control_get_exclusive(dev, "hw");
if (IS_ERR(priv->rst_hw)) {
dev_err(dev, "failed to get hw reset\n");
- ret = PTR_ERR(priv->rst_hw);
- goto free_host;
+ return PTR_ERR(priv->rst_hw);
}
- host->ops.hw_reset = uniphier_sd_hw_reset;
+ host->ops.card_hw_reset = uniphier_sd_hw_reset;
}
if (host->mmc->caps & MMC_CAP_UHS) {
- ret = uniphier_sd_uhs_init(host, priv);
+ ret = uniphier_sd_uhs_init(host);
if (ret) {
dev_warn(dev,
"failed to setup UHS (error %d). Disabling UHS.",
ret);
host->mmc->caps &= ~MMC_CAP_UHS;
+ } else {
+ host->ops.start_signal_voltage_switch =
+ uniphier_sd_start_signal_voltage_switch;
}
}
@@ -622,7 +693,7 @@ static int uniphier_sd_probe(struct platform_device *pdev)
ret = uniphier_sd_clk_enable(host);
if (ret)
- goto free_host;
+ return ret;
uniphier_sd_host_init(host);
@@ -633,36 +704,31 @@ static int uniphier_sd_probe(struct platform_device *pdev)
tmio_data->max_segs = 1;
tmio_data->max_blk_count = U16_MAX;
- ret = tmio_mmc_host_probe(host);
- if (ret)
- goto disable_clk;
+ sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, TMIO_MASK_ALL);
ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
dev_name(dev), host);
if (ret)
- goto remove_host;
+ goto disable_clk;
+
+ ret = tmio_mmc_host_probe(host);
+ if (ret)
+ goto disable_clk;
return 0;
-remove_host:
- tmio_mmc_host_remove(host);
disable_clk:
uniphier_sd_clk_disable(host);
-free_host:
- tmio_mmc_host_free(host);
return ret;
}
-static int uniphier_sd_remove(struct platform_device *pdev)
+static void uniphier_sd_remove(struct platform_device *pdev)
{
struct tmio_mmc_host *host = platform_get_drvdata(pdev);
tmio_mmc_host_remove(host);
uniphier_sd_clk_disable(host);
- tmio_mmc_host_free(host);
-
- return 0;
}
static const struct of_device_id uniphier_sd_match[] = {
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b9b79b1089a0..3bccf800339b 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -323,7 +323,7 @@ static void usdhi6_blk_bounce(struct usdhi6_host *host,
host->head_pg.page = host->pg.page;
host->head_pg.mapped = host->pg.mapped;
- host->pg.page = nth_page(host->pg.page, 1);
+ host->pg.page = host->pg.page + 1;
host->pg.mapped = kmap(host->pg.page);
host->blk_page = host->bounce_buf;
@@ -503,7 +503,7 @@ static void usdhi6_sg_advance(struct usdhi6_host *host)
/* We cannot get here after crossing a page border */
/* Next page in the same SG */
- host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
+ host->pg.page = sg_page(host->sg) + host->page_idx;
host->pg.mapped = kmap(host->pg.page);
host->blk_page = host->pg.mapped;
@@ -631,9 +631,9 @@ static void usdhi6_dma_kill(struct usdhi6_host *host)
__func__, data->sg_len, data->blocks, data->blksz);
/* Abort DMA */
if (data->flags & MMC_DATA_READ)
- dmaengine_terminate_all(host->chan_rx);
+ dmaengine_terminate_sync(host->chan_rx);
else
- dmaengine_terminate_all(host->chan_tx);
+ dmaengine_terminate_sync(host->chan_tx);
}
static void usdhi6_dma_check_error(struct usdhi6_host *host)
@@ -1186,6 +1186,15 @@ static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
return ret;
}
+static int usdhi6_card_busy(struct mmc_host *mmc)
+{
+ struct usdhi6_host *host = mmc_priv(mmc);
+ u32 tmp = usdhi6_read(host, USDHI6_SD_INFO2);
+
+ /* Card is busy if it is pulling dat[0] low */
+ return !(tmp & USDHI6_SD_INFO2_SDDAT0);
+}
+
static const struct mmc_host_ops usdhi6_ops = {
.request = usdhi6_request,
.set_ios = usdhi6_set_ios,
@@ -1193,6 +1202,7 @@ static const struct mmc_host_ops usdhi6_ops = {
.get_ro = usdhi6_get_ro,
.enable_sdio_irq = usdhi6_enable_sdio_irq,
.start_signal_voltage_switch = usdhi6_sig_volt_switch,
+ .card_busy = usdhi6_card_busy,
};
/* State machine handlers */
@@ -1747,20 +1757,22 @@ static int usdhi6_probe(struct platform_device *pdev)
irq_cd = platform_get_irq_byname(pdev, "card detect");
irq_sd = platform_get_irq_byname(pdev, "data");
irq_sdio = platform_get_irq_byname(pdev, "SDIO");
- if (irq_sd < 0 || irq_sdio < 0)
- return -ENODEV;
+ if (irq_sd < 0)
+ return irq_sd;
+ if (irq_sdio < 0)
+ return irq_sdio;
- mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
+ mmc = devm_mmc_alloc_host(dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
ret = mmc_regulator_get_supply(mmc);
if (ret)
- goto e_free_mmc;
+ return ret;
ret = mmc_of_parse(mmc);
if (ret < 0)
- goto e_free_mmc;
+ return ret;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -1773,31 +1785,24 @@ static int usdhi6_probe(struct platform_device *pdev)
mmc->max_busy_timeout = USDHI6_REQ_TIMEOUT_MS;
host->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (IS_ERR(host->pinctrl)) {
- ret = PTR_ERR(host->pinctrl);
- goto e_free_mmc;
- }
+ if (IS_ERR(host->pinctrl))
+ return PTR_ERR(host->pinctrl);
host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(host->base)) {
- ret = PTR_ERR(host->base);
- goto e_free_mmc;
- }
+ host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- goto e_free_mmc;
- }
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
host->imclk = clk_get_rate(host->clk);
ret = clk_prepare_enable(host->clk);
if (ret < 0)
- goto e_free_mmc;
+ return ret;
version = usdhi6_read(host, USDHI6_VERSION);
if ((version & 0xfff) != 0xa0d) {
@@ -1867,13 +1872,10 @@ e_release_dma:
usdhi6_dma_release(host);
e_clk_off:
clk_disable_unprepare(host->clk);
-e_free_mmc:
- mmc_free_host(mmc);
-
return ret;
}
-static int usdhi6_remove(struct platform_device *pdev)
+static void usdhi6_remove(struct platform_device *pdev)
{
struct usdhi6_host *host = platform_get_drvdata(pdev);
@@ -1883,9 +1885,6 @@ static int usdhi6_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&host->timeout_work);
usdhi6_dma_release(host);
clk_disable_unprepare(host->clk);
- mmc_free_host(host->mmc);
-
- return 0;
}
static struct platform_driver usdhi6_driver = {
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index 9a6358fd9512..2b7456e942f7 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -404,8 +404,6 @@ static void ushc_clean_up(struct ushc_data *ushc)
kfree(ushc->int_data);
kfree(ushc->cbw);
kfree(ushc->csw);
-
- mmc_free_host(ushc->mmc);
}
static const struct mmc_host_ops ushc_ops = {
@@ -425,7 +423,7 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
if (intf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
- mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
+ mmc = devm_mmc_alloc_host(&intf->dev, sizeof(*ushc));
if (mmc == NULL)
return -ENOMEM;
ushc = mmc_priv(mmc);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index c32df5530b94..c628b3bbfd7a 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/mmc/host.h>
+#include <linux/workqueue.h>
#define DRV_NAME "via_sdmmc"
@@ -307,7 +308,7 @@ struct via_crdr_mmc_host {
struct sdhcreg pm_sdhc_reg;
struct work_struct carddet_work;
- struct tasklet_struct finish_tasklet;
+ struct work_struct finish_bh_work;
struct timer_list timer;
spinlock_t lock;
@@ -491,7 +492,7 @@ static void via_sdc_preparedata(struct via_crdr_mmc_host *host,
count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
((data->flags & MMC_DATA_READ) ?
- PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+ DMA_FROM_DEVICE : DMA_TO_DEVICE));
BUG_ON(count != 1);
via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg),
@@ -638,12 +639,12 @@ static void via_sdc_finish_data(struct via_crdr_mmc_host *host)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
((data->flags & MMC_DATA_READ) ?
- PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+ DMA_FROM_DEVICE : DMA_TO_DEVICE));
if (data->stop)
via_sdc_send_command(host, data->stop);
else
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
static void via_sdc_finish_command(struct via_crdr_mmc_host *host)
@@ -653,7 +654,7 @@ static void via_sdc_finish_command(struct via_crdr_mmc_host *host)
host->cmd->error = 0;
if (!host->cmd->data)
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
host->cmd = NULL;
}
@@ -682,7 +683,7 @@ static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq)
status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
if (!(status & VIA_CRDR_SDSTS_SLOTG) || host->reject) {
host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
} else {
via_sdc_send_command(host, mrq->cmd);
}
@@ -848,7 +849,7 @@ static void via_sdc_cmd_isr(struct via_crdr_mmc_host *host, u16 intmask)
host->cmd->error = -EILSEQ;
if (host->cmd->error)
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
else if (intmask & VIA_CRDR_SDSTS_CRD)
via_sdc_finish_command(host);
}
@@ -936,7 +937,7 @@ static void via_sdc_timeout(struct timer_list *t)
struct via_crdr_mmc_host *sdhost;
unsigned long flags;
- sdhost = from_timer(sdhost, t, timer);
+ sdhost = timer_container_of(sdhost, t, timer);
spin_lock_irqsave(&sdhost->lock, flags);
@@ -955,22 +956,22 @@ static void via_sdc_timeout(struct timer_list *t)
sdhost->cmd->error = -ETIMEDOUT;
else
sdhost->mrq->cmd->error = -ETIMEDOUT;
- tasklet_schedule(&sdhost->finish_tasklet);
+ queue_work(system_bh_wq, &sdhost->finish_bh_work);
}
}
spin_unlock_irqrestore(&sdhost->lock, flags);
}
-static void via_sdc_tasklet_finish(struct tasklet_struct *t)
+static void via_sdc_finish_bh_work(struct work_struct *t)
{
- struct via_crdr_mmc_host *host = from_tasklet(host, t, finish_tasklet);
+ struct via_crdr_mmc_host *host = from_work(host, t, finish_bh_work);
unsigned long flags;
struct mmc_request *mrq;
spin_lock_irqsave(&host->lock, flags);
- del_timer(&host->timer);
+ timer_delete(&host->timer);
mrq = host->mrq;
host->mrq = NULL;
host->cmd = NULL;
@@ -1005,7 +1006,7 @@ static void via_sdc_card_detect(struct work_struct *work)
pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -1051,7 +1052,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
INIT_WORK(&host->carddet_work, via_sdc_card_detect);
- tasklet_setup(&host->finish_tasklet, via_sdc_tasklet_finish);
+ INIT_WORK(&host->finish_bh_work, via_sdc_finish_bh_work);
addrbase = host->sdhc_mmiobase;
writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
@@ -1099,7 +1100,7 @@ static int via_sd_probe(struct pci_dev *pcidev,
pci_write_config_byte(pcidev, VIA_CRDR_PCI_WORK_MODE, 0);
pci_write_config_byte(pcidev, VIA_CRDR_PCI_DBG_MODE, 0);
- mmc = mmc_alloc_host(sizeof(struct via_crdr_mmc_host), &pcidev->dev);
+ mmc = devm_mmc_alloc_host(&pcidev->dev, sizeof(*sdhost));
if (!mmc) {
ret = -ENOMEM;
goto release;
@@ -1114,7 +1115,7 @@ static int via_sd_probe(struct pci_dev *pcidev,
sdhost->mmiobase = ioremap(base, len);
if (!sdhost->mmiobase) {
ret = -ENOMEM;
- goto free_mmc_host;
+ goto release;
}
sdhost->sdhc_mmiobase =
@@ -1151,15 +1152,14 @@ static int via_sd_probe(struct pci_dev *pcidev,
pcidev->subsystem_device == 0x3891)
sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto unmap;
return 0;
unmap:
iounmap(sdhost->mmiobase);
-free_mmc_host:
- dev_set_drvdata(&pcidev->dev, NULL);
- mmc_free_host(mmc);
release:
pci_release_regions(pcidev);
disable:
@@ -1192,7 +1192,7 @@ static void via_sd_remove(struct pci_dev *pcidev)
sdhost->mrq->cmd->error = -ENOMEDIUM;
if (sdhost->mrq->stop)
sdhost->mrq->stop->error = -ENOMEDIUM;
- tasklet_schedule(&sdhost->finish_tasklet);
+ queue_work(system_bh_wq, &sdhost->finish_bh_work);
}
spin_unlock_irqrestore(&sdhost->lock, flags);
@@ -1200,9 +1200,9 @@ static void via_sd_remove(struct pci_dev *pcidev)
free_irq(pcidev->irq, sdhost);
- del_timer_sync(&sdhost->timer);
+ timer_delete_sync(&sdhost->timer);
- tasklet_kill(&sdhost->finish_tasklet);
+ cancel_work_sync(&sdhost->finish_bh_work);
/* switch off power */
gatt = readb(sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
@@ -1210,8 +1210,6 @@ static void via_sd_remove(struct pci_dev *pcidev)
writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
iounmap(sdhost->mmiobase);
- dev_set_drvdata(&pcidev->dev, NULL);
- mmc_free_host(sdhost->mmc);
pci_release_regions(pcidev);
pci_disable_device(pcidev);
@@ -1220,7 +1218,7 @@ static void via_sd_remove(struct pci_dev *pcidev)
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
}
-static void __maybe_unused via_init_sdc_pm(struct via_crdr_mmc_host *host)
+static void via_init_sdc_pm(struct via_crdr_mmc_host *host)
{
struct sdhcreg *pm_sdhcreg;
void __iomem *addrbase;
@@ -1254,7 +1252,7 @@ static void __maybe_unused via_init_sdc_pm(struct via_crdr_mmc_host *host)
via_print_sdchc(host);
}
-static int __maybe_unused via_sd_suspend(struct device *dev)
+static int via_sd_suspend(struct device *dev)
{
struct via_crdr_mmc_host *host;
unsigned long flags;
@@ -1271,7 +1269,7 @@ static int __maybe_unused via_sd_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused via_sd_resume(struct device *dev)
+static int via_sd_resume(struct device *dev)
{
struct via_crdr_mmc_host *sdhost;
u8 gatt;
@@ -1297,14 +1295,14 @@ static int __maybe_unused via_sd_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(via_sd_pm_ops, via_sd_suspend, via_sd_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(via_sd_pm_ops, via_sd_suspend, via_sd_resume);
static struct pci_driver via_sd_driver = {
.name = DRV_NAME,
.id_table = via_ids,
.probe = via_sd_probe,
.remove = via_sd_remove,
- .driver.pm = &via_sd_pm_ops,
+ .driver.pm = pm_sleep_ptr(&via_sd_pm_ops),
};
module_pci_driver(via_sd_driver);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 4950d10d3a19..ff49d0770506 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -369,13 +369,11 @@ struct vub300_mmc_host {
static void vub300_delete(struct kref *kref)
{ /* kref callback - softirq */
struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref);
- struct mmc_host *mmc = vub300->mmc;
usb_free_urb(vub300->command_out_urb);
vub300->command_out_urb = NULL;
usb_free_urb(vub300->command_res_urb);
vub300->command_res_urb = NULL;
usb_put_dev(vub300->udev);
- mmc_free_host(mmc);
/*
* and hence also frees vub300
* which is contained at the end of struct mmc
@@ -512,7 +510,7 @@ static void new_system_port_status(struct vub300_mmc_host *vub300)
vub300->card_present = 1;
vub300->bus_width = 0;
if (disable_offload_processing)
- strncpy(vub300->vub_name, "EMPTY Processing Disabled",
+ strscpy(vub300->vub_name, "EMPTY Processing Disabled",
sizeof(vub300->vub_name));
else
vub300->vub_name[0] = 0;
@@ -576,7 +574,7 @@ static void check_vub300_port_status(struct vub300_mmc_host *vub300)
GET_SYSTEM_PORT_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->system_port_status,
- sizeof(vub300->system_port_status), HZ);
+ sizeof(vub300->system_port_status), 1000);
if (sizeof(vub300->system_port_status) == retval)
new_system_port_status(vub300);
}
@@ -740,8 +738,8 @@ static void vub300_deadwork_thread(struct work_struct *work)
static void vub300_inactivity_timer_expired(struct timer_list *t)
{ /* softirq */
- struct vub300_mmc_host *vub300 = from_timer(vub300, t,
- inactivity_timer);
+ struct vub300_mmc_host *vub300 = timer_container_of(vub300, t,
+ inactivity_timer);
if (!vub300->interface) {
kref_put(&vub300->kref, vub300_delete);
} else if (vub300->cmd) {
@@ -1180,8 +1178,8 @@ static void send_command(struct vub300_mmc_host *vub300)
*/
static void vub300_sg_timed_out(struct timer_list *t)
{
- struct vub300_mmc_host *vub300 = from_timer(vub300, t,
- sg_transfer_timer);
+ struct vub300_mmc_host *vub300 = timer_container_of(vub300, t,
+ sg_transfer_timer);
vub300->usb_timed_out = 1;
usb_sg_cancel(&vub300->sg_request);
usb_unlink_urb(vub300->command_out_urb);
@@ -1216,7 +1214,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
dev_err(&vub300->udev->dev,
"corrupt offload pseudocode in firmware %s\n",
vub300->vub_name);
- strncpy(vub300->vub_name, "corrupt offload pseudocode",
+ strscpy(vub300->vub_name, "corrupt offload pseudocode",
sizeof(vub300->vub_name));
return;
}
@@ -1241,7 +1239,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
SET_INTERRUPT_PSEUDOCODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0x0000, 0x0000,
- xfer_buffer, xfer_length, HZ);
+ xfer_buffer, xfer_length, 1000);
kfree(xfer_buffer);
if (retval < 0)
goto copy_error_message;
@@ -1250,7 +1248,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
"not enough memory for xfer buffer to send"
" INTERRUPT_PSEUDOCODE for %s %s\n", fw->data,
vub300->vub_name);
- strncpy(vub300->vub_name,
+ strscpy(vub300->vub_name,
"SDIO interrupt pseudocode download failed",
sizeof(vub300->vub_name));
return;
@@ -1259,7 +1257,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
dev_err(&vub300->udev->dev,
"corrupt interrupt pseudocode in firmware %s %s\n",
fw->data, vub300->vub_name);
- strncpy(vub300->vub_name, "corrupt interrupt pseudocode",
+ strscpy(vub300->vub_name, "corrupt interrupt pseudocode",
sizeof(vub300->vub_name));
return;
}
@@ -1284,7 +1282,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
SET_TRANSFER_PSEUDOCODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0x0000, 0x0000,
- xfer_buffer, xfer_length, HZ);
+ xfer_buffer, xfer_length, 1000);
kfree(xfer_buffer);
if (retval < 0)
goto copy_error_message;
@@ -1293,7 +1291,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
"not enough memory for xfer buffer to send"
" TRANSFER_PSEUDOCODE for %s %s\n", fw->data,
vub300->vub_name);
- strncpy(vub300->vub_name,
+ strscpy(vub300->vub_name,
"SDIO transfer pseudocode download failed",
sizeof(vub300->vub_name));
return;
@@ -1302,7 +1300,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
dev_err(&vub300->udev->dev,
"corrupt transfer pseudocode in firmware %s %s\n",
fw->data, vub300->vub_name);
- strncpy(vub300->vub_name, "corrupt transfer pseudocode",
+ strscpy(vub300->vub_name, "corrupt transfer pseudocode",
sizeof(vub300->vub_name));
return;
}
@@ -1336,15 +1334,13 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
dev_err(&vub300->udev->dev,
"corrupt dynamic registers in firmware %s\n",
vub300->vub_name);
- strncpy(vub300->vub_name, "corrupt dynamic registers",
+ strscpy(vub300->vub_name, "corrupt dynamic registers",
sizeof(vub300->vub_name));
return;
}
- return;
-
copy_error_message:
- strncpy(vub300->vub_name, "SDIO pseudocode download failed",
+ strscpy(vub300->vub_name, "SDIO pseudocode download failed",
sizeof(vub300->vub_name));
}
@@ -1372,11 +1368,11 @@ static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
vub300->vub_name);
retval = request_firmware(&fw, vub300->vub_name, &card->dev);
if (retval < 0) {
- strncpy(vub300->vub_name, "vub_default.bin",
+ strscpy(vub300->vub_name, "vub_default.bin",
sizeof(vub300->vub_name));
retval = request_firmware(&fw, vub300->vub_name, &card->dev);
if (retval < 0) {
- strncpy(vub300->vub_name,
+ strscpy(vub300->vub_name,
"no SDIO offload firmware found",
sizeof(vub300->vub_name));
} else {
@@ -1454,7 +1450,7 @@ static int __command_read_data(struct vub300_mmc_host *vub300,
(linear_length / 16384));
add_timer(&vub300->sg_transfer_timer);
usb_sg_wait(&vub300->sg_request);
- del_timer(&vub300->sg_transfer_timer);
+ timer_delete(&vub300->sg_transfer_timer);
if (vub300->sg_request.status < 0) {
cmd->error = vub300->sg_request.status;
data->bytes_xfered = 0;
@@ -1574,7 +1570,7 @@ static int __command_write_data(struct vub300_mmc_host *vub300,
if (cmd->error) {
data->bytes_xfered = 0;
} else {
- del_timer(&vub300->sg_transfer_timer);
+ timer_delete(&vub300->sg_transfer_timer);
if (vub300->sg_request.status < 0) {
cmd->error = vub300->sg_request.status;
data->bytes_xfered = 0;
@@ -1715,6 +1711,9 @@ static void construct_request_response(struct vub300_mmc_host *vub300,
int bytes = 3 & less_cmd;
int words = less_cmd >> 2;
u8 *r = vub300->resp.response.command_response;
+
+ if (!resp_len)
+ return;
if (bytes == 3) {
cmd->resp[words] = (r[1 + (words << 2)] << 24)
| (r[2 + (words << 2)] << 16)
@@ -1757,7 +1756,7 @@ static void vub300_cmndwork_thread(struct work_struct *work)
* has been already downloaded to the VUB300 chip
*/
} else if (0 == vub300->mmc->card->sdio_funcs) {
- strncpy(vub300->vub_name, "SD memory device",
+ strscpy(vub300->vub_name, "SD memory device",
sizeof(vub300->vub_name));
} else {
download_offload_pseudocode(vub300);
@@ -1991,7 +1990,7 @@ static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_CLOCK_SPEED,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x00, 0x00, buf, buf_array_size, HZ);
+ 0x00, 0x00, buf, buf_array_size, 1000);
if (retval != 8) {
dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
" %dkHz failed with retval=%d\n", kHzClock, retval);
@@ -2013,14 +2012,14 @@ static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_SD_POWER,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x0000, 0x0000, NULL, 0, HZ);
+ 0x0000, 0x0000, NULL, 0, 1000);
/* must wait for the VUB300 u-proc to boot up */
msleep(600);
} else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_SD_POWER,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x0001, 0x0000, NULL, 0, HZ);
+ 0x0001, 0x0000, NULL, 0, 1000);
msleep(600);
vub300->card_powered = 1;
} else if (ios->power_mode == MMC_POWER_ON) {
@@ -2049,6 +2048,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
return;
kref_get(&vub300->kref);
if (enable) {
+ set_current_state(TASK_RUNNING);
mutex_lock(&vub300->irq_mutex);
if (vub300->irqs_queued) {
vub300->irqs_queued -= 1;
@@ -2064,6 +2064,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
vub300_queue_poll_work(vub300, 0);
}
mutex_unlock(&vub300->irq_mutex);
+ set_current_state(TASK_INTERRUPTIBLE);
} else {
vub300->irq_enabled = 0;
}
@@ -2111,7 +2112,7 @@ static int vub300_probe(struct usb_interface *interface,
goto error1;
}
/* this also allocates memory for our VUB300 mmc host device */
- mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
+ mmc = devm_mmc_alloc_host(&udev->dev, sizeof(*vub300));
if (!mmc) {
retval = -ENOMEM;
dev_err(&udev->dev, "not enough memory for the mmc_host\n");
@@ -2268,23 +2269,23 @@ static int vub300_probe(struct usb_interface *interface,
dev_err(&vub300->udev->dev,
"Could not find two sets of bulk-in/out endpoint pairs\n");
retval = -EINVAL;
- goto error5;
+ goto error4;
}
retval =
usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
GET_HC_INF0,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->hc_info,
- sizeof(vub300->hc_info), HZ);
+ sizeof(vub300->hc_info), 1000);
if (retval < 0)
- goto error5;
+ goto error4;
retval =
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_ROM_WAIT_STATES,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
+ firmware_rom_wait_states, 0x0000, NULL, 0, 1000);
if (retval < 0)
- goto error5;
+ goto error4;
dev_info(&vub300->udev->dev,
"operating_mode = %s %s %d MHz %s %d byte USB packets\n",
(mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL",
@@ -2297,7 +2298,7 @@ static int vub300_probe(struct usb_interface *interface,
GET_SYSTEM_PORT_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->system_port_status,
- sizeof(vub300->system_port_status), HZ);
+ sizeof(vub300->system_port_status), 1000);
if (retval < 0) {
goto error4;
} else if (sizeof(vub300->system_port_status) == retval) {
@@ -2306,6 +2307,7 @@ static int vub300_probe(struct usb_interface *interface,
vub300->read_only =
(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
} else {
+ retval = -EINVAL;
goto error4;
}
usb_set_intfdata(interface, vub300);
@@ -2329,10 +2331,13 @@ static int vub300_probe(struct usb_interface *interface,
"USB vub300 remote SDIO host controller[%d]"
"connected with no SD/SDIO card inserted\n",
interface_to_InterfaceNumber(interface));
- mmc_add_host(mmc);
+ retval = mmc_add_host(mmc);
+ if (retval)
+ goto error6;
+
return 0;
-error5:
- mmc_free_host(mmc);
+error6:
+ timer_delete_sync(&vub300->inactivity_timer);
/*
* and hence also frees vub300
* which is contained at the end of struct mmc
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 67ecd342fe5f..c33a0223ce7f 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -267,7 +267,7 @@ static inline int wbsd_next_sg(struct wbsd_host *host)
static inline char *wbsd_map_sg(struct wbsd_host *host)
{
- return kmap_atomic(sg_page(host->cur_sg)) + host->cur_sg->offset;
+ return kmap_local_page(sg_page(host->cur_sg)) + host->cur_sg->offset;
}
static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
@@ -439,7 +439,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
* End of scatter list entry?
*/
if (host->remain == 0) {
- kunmap_atomic(buffer);
+ kunmap_local(buffer);
/*
* Get next entry. Check if last.
*/
@@ -451,7 +451,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
}
}
}
- kunmap_atomic(buffer);
+ kunmap_local(buffer);
/*
* This is a very dirty hack to solve a
@@ -459,7 +459,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
* FIFO threshold interrupts properly.
*/
if ((data->blocks * data->blksz - data->bytes_xfered) < 16)
- tasklet_schedule(&host->fifo_tasklet);
+ queue_work(system_bh_wq, &host->fifo_bh_work);
}
static void wbsd_fill_fifo(struct wbsd_host *host)
@@ -505,7 +505,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
* End of scatter list entry?
*/
if (host->remain == 0) {
- kunmap_atomic(buffer);
+ kunmap_local(buffer);
/*
* Get next entry. Check if last.
*/
@@ -517,14 +517,14 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
}
}
}
- kunmap_atomic(buffer);
+ kunmap_local(buffer);
/*
* The controller stops sending interrupts for
* 'FIFO empty' under certain conditions. So we
* need to be a bit more pro-active.
*/
- tasklet_schedule(&host->fifo_tasklet);
+ queue_work(system_bh_wq, &host->fifo_bh_work);
}
static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
@@ -746,7 +746,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct mmc_command *cmd;
/*
- * Disable tasklets to avoid a deadlock.
+ * Disable bh works to avoid a deadlock.
*/
spin_lock_bh(&host->lock);
@@ -821,7 +821,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
* Dirty fix for hardware bug.
*/
if (host->dma == -1)
- tasklet_schedule(&host->fifo_tasklet);
+ queue_work(system_bh_wq, &host->fifo_bh_work);
spin_unlock_bh(&host->lock);
@@ -947,7 +947,7 @@ static const struct mmc_host_ops wbsd_ops = {
static void wbsd_reset_ignore(struct timer_list *t)
{
- struct wbsd_host *host = from_timer(host, t, ignore_timer);
+ struct wbsd_host *host = timer_container_of(host, t, ignore_timer);
BUG_ON(host == NULL);
@@ -961,13 +961,13 @@ static void wbsd_reset_ignore(struct timer_list *t)
* Card status might have changed during the
* blackout.
*/
- tasklet_schedule(&host->card_tasklet);
+ queue_work(system_bh_wq, &host->card_bh_work);
spin_unlock_bh(&host->lock);
}
/*
- * Tasklets
+ * BH Works
*/
static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
@@ -987,9 +987,9 @@ static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
return host->mrq->cmd->data;
}
-static void wbsd_tasklet_card(struct tasklet_struct *t)
+static void wbsd_card_bh_work(struct work_struct *t)
{
- struct wbsd_host *host = from_tasklet(host, t, card_tasklet);
+ struct wbsd_host *host = from_work(host, t, card_bh_work);
u8 csr;
int delay = -1;
@@ -1020,7 +1020,7 @@ static void wbsd_tasklet_card(struct tasklet_struct *t)
wbsd_reset(host);
host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
delay = 0;
@@ -1036,9 +1036,9 @@ static void wbsd_tasklet_card(struct tasklet_struct *t)
mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
}
-static void wbsd_tasklet_fifo(struct tasklet_struct *t)
+static void wbsd_fifo_bh_work(struct work_struct *t)
{
- struct wbsd_host *host = from_tasklet(host, t, fifo_tasklet);
+ struct wbsd_host *host = from_work(host, t, fifo_bh_work);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1060,16 +1060,16 @@ static void wbsd_tasklet_fifo(struct tasklet_struct *t)
*/
if (host->num_sg == 0) {
wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
end:
spin_unlock(&host->lock);
}
-static void wbsd_tasklet_crc(struct tasklet_struct *t)
+static void wbsd_crc_bh_work(struct work_struct *t)
{
- struct wbsd_host *host = from_tasklet(host, t, crc_tasklet);
+ struct wbsd_host *host = from_work(host, t, crc_bh_work);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1085,15 +1085,15 @@ static void wbsd_tasklet_crc(struct tasklet_struct *t)
data->error = -EILSEQ;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
end:
spin_unlock(&host->lock);
}
-static void wbsd_tasklet_timeout(struct tasklet_struct *t)
+static void wbsd_timeout_bh_work(struct work_struct *t)
{
- struct wbsd_host *host = from_tasklet(host, t, timeout_tasklet);
+ struct wbsd_host *host = from_work(host, t, timeout_bh_work);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1109,15 +1109,15 @@ static void wbsd_tasklet_timeout(struct tasklet_struct *t)
data->error = -ETIMEDOUT;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
end:
spin_unlock(&host->lock);
}
-static void wbsd_tasklet_finish(struct tasklet_struct *t)
+static void wbsd_finish_bh_work(struct work_struct *t)
{
- struct wbsd_host *host = from_tasklet(host, t, finish_tasklet);
+ struct wbsd_host *host = from_work(host, t, finish_bh_work);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1156,18 +1156,18 @@ static irqreturn_t wbsd_irq(int irq, void *dev_id)
host->isr |= isr;
/*
- * Schedule tasklets as needed.
+ * Schedule bh work as needed.
*/
if (isr & WBSD_INT_CARD)
- tasklet_schedule(&host->card_tasklet);
+ queue_work(system_bh_wq, &host->card_bh_work);
if (isr & WBSD_INT_FIFO_THRE)
- tasklet_schedule(&host->fifo_tasklet);
+ queue_work(system_bh_wq, &host->fifo_bh_work);
if (isr & WBSD_INT_CRC)
- tasklet_hi_schedule(&host->crc_tasklet);
+ queue_work(system_bh_highpri_wq, &host->crc_bh_work);
if (isr & WBSD_INT_TIMEOUT)
- tasklet_hi_schedule(&host->timeout_tasklet);
+ queue_work(system_bh_highpri_wq, &host->timeout_bh_work);
if (isr & WBSD_INT_TC)
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
return IRQ_HANDLED;
}
@@ -1190,7 +1190,7 @@ static int wbsd_alloc_mmc(struct device *dev)
/*
* Allocate MMC structure.
*/
- mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
+ mmc = devm_mmc_alloc_host(dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
@@ -1261,11 +1261,7 @@ static void wbsd_free_mmc(struct device *dev)
host = mmc_priv(mmc);
BUG_ON(host == NULL);
- del_timer_sync(&host->ignore_timer);
-
- mmc_free_host(mmc);
-
- dev_set_drvdata(dev, NULL);
+ timer_delete_sync(&host->ignore_timer);
}
/*
@@ -1286,8 +1282,6 @@ static int wbsd_scan(struct wbsd_host *host)
continue;
for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
- id = 0xFFFF;
-
host->config = config_ports[i];
host->unlock_code = unlock_codes[j];
@@ -1447,13 +1441,13 @@ static int wbsd_request_irq(struct wbsd_host *host, int irq)
int ret;
/*
- * Set up tasklets. Must be done before requesting interrupt.
+ * Set up bh works. Must be done before requesting interrupt.
*/
- tasklet_setup(&host->card_tasklet, wbsd_tasklet_card);
- tasklet_setup(&host->fifo_tasklet, wbsd_tasklet_fifo);
- tasklet_setup(&host->crc_tasklet, wbsd_tasklet_crc);
- tasklet_setup(&host->timeout_tasklet, wbsd_tasklet_timeout);
- tasklet_setup(&host->finish_tasklet, wbsd_tasklet_finish);
+ INIT_WORK(&host->card_bh_work, wbsd_card_bh_work);
+ INIT_WORK(&host->fifo_bh_work, wbsd_fifo_bh_work);
+ INIT_WORK(&host->crc_bh_work, wbsd_crc_bh_work);
+ INIT_WORK(&host->timeout_bh_work, wbsd_timeout_bh_work);
+ INIT_WORK(&host->finish_bh_work, wbsd_finish_bh_work);
/*
* Allocate interrupt.
@@ -1476,11 +1470,11 @@ static void wbsd_release_irq(struct wbsd_host *host)
host->irq = 0;
- tasklet_kill(&host->card_tasklet);
- tasklet_kill(&host->fifo_tasklet);
- tasklet_kill(&host->crc_tasklet);
- tasklet_kill(&host->timeout_tasklet);
- tasklet_kill(&host->finish_tasklet);
+ cancel_work_sync(&host->card_bh_work);
+ cancel_work_sync(&host->fifo_bh_work);
+ cancel_work_sync(&host->crc_bh_work);
+ cancel_work_sync(&host->timeout_bh_work);
+ cancel_work_sync(&host->finish_bh_work);
}
/*
@@ -1698,7 +1692,15 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
*/
wbsd_init_device(host);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ if (!pnp)
+ wbsd_chip_poweroff(host);
+
+ wbsd_release_resources(host);
+ wbsd_free_mmc(dev);
+ return ret;
+ }
pr_info("%s: W83L51xD", mmc_hostname(mmc));
if (host->chip_id != 0)
@@ -1748,11 +1750,9 @@ static int wbsd_probe(struct platform_device *dev)
return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0);
}
-static int wbsd_remove(struct platform_device *dev)
+static void wbsd_remove(struct platform_device *dev)
{
wbsd_shutdown(&dev->dev, 0);
-
- return 0;
}
/*
@@ -1895,7 +1895,6 @@ static struct platform_device *wbsd_device;
static struct platform_driver wbsd_driver = {
.probe = wbsd_probe,
.remove = wbsd_remove,
-
.suspend = wbsd_platform_suspend,
.resume = wbsd_platform_resume,
.driver = {
diff --git a/drivers/mmc/host/wbsd.h b/drivers/mmc/host/wbsd.h
index be30b4d8ce4c..970886831305 100644
--- a/drivers/mmc/host/wbsd.h
+++ b/drivers/mmc/host/wbsd.h
@@ -171,11 +171,11 @@ struct wbsd_host
int irq; /* Interrupt */
int dma; /* DMA channel */
- struct tasklet_struct card_tasklet; /* Tasklet structures */
- struct tasklet_struct fifo_tasklet;
- struct tasklet_struct crc_tasklet;
- struct tasklet_struct timeout_tasklet;
- struct tasklet_struct finish_tasklet;
+ struct work_struct card_bh_work; /* Work structures */
+ struct work_struct fifo_bh_work;
+ struct work_struct crc_bh_work;
+ struct work_struct timeout_bh_work;
+ struct work_struct finish_bh_work;
struct timer_list ignore_timer; /* Ignore detection timer */
};
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index cf10949fb0ac..1b1d691e19fc 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -21,7 +21,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_device.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
@@ -751,19 +750,16 @@ static int wmt_mci_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *of_id =
- of_match_device(wmt_mci_dt_ids, &pdev->dev);
const struct wmt_mci_caps *wmt_caps;
int ret;
int regular_irq, dma_irq;
- if (!of_id || !of_id->data) {
+ wmt_caps = of_device_get_match_data(&pdev->dev);
+ if (!wmt_caps) {
dev_err(&pdev->dev, "Controller capabilities data missing\n");
return -EFAULT;
}
- wmt_caps = of_id->data;
-
if (!np) {
dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
return -EFAULT;
@@ -778,7 +774,7 @@ static int wmt_mci_probe(struct platform_device *pdev)
goto fail1;
}
- mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*priv));
if (!mmc) {
dev_err(&pdev->dev, "Failed to allocate mmc_host\n");
ret = -ENOMEM;
@@ -805,16 +801,14 @@ static int wmt_mci_probe(struct platform_device *pdev)
priv->power_inverted = 0;
priv->cd_inverted = 0;
- if (of_get_property(np, "sdon-inverted", NULL))
- priv->power_inverted = 1;
- if (of_get_property(np, "cd-inverted", NULL))
- priv->cd_inverted = 1;
+ priv->power_inverted = of_property_read_bool(np, "sdon-inverted");
+ priv->cd_inverted = of_property_read_bool(np, "cd-inverted");
priv->sdmmc_base = of_iomap(np, 0);
if (!priv->sdmmc_base) {
dev_err(&pdev->dev, "Failed to map IO space\n");
ret = -ENOMEM;
- goto fail2;
+ goto fail1;
}
priv->irq_regular = regular_irq;
@@ -849,7 +843,7 @@ static int wmt_mci_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk_sdmmc)) {
dev_err(&pdev->dev, "Error getting clock\n");
ret = PTR_ERR(priv->clk_sdmmc);
- goto fail5;
+ goto fail5_and_a_half;
}
ret = clk_prepare_enable(priv->clk_sdmmc);
@@ -859,30 +853,34 @@ static int wmt_mci_probe(struct platform_device *pdev)
/* configure the controller to a known 'ready' state */
wmt_reset_hardware(mmc);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto fail7;
dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
return 0;
+fail7:
+ clk_disable_unprepare(priv->clk_sdmmc);
fail6:
clk_put(priv->clk_sdmmc);
+fail5_and_a_half:
+ dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16,
+ priv->dma_desc_buffer, priv->dma_desc_device_addr);
fail5:
free_irq(dma_irq, priv);
fail4:
free_irq(regular_irq, priv);
fail3:
iounmap(priv->sdmmc_base);
-fail2:
- mmc_free_host(mmc);
fail1:
return ret;
}
-static int wmt_mci_remove(struct platform_device *pdev)
+static void wmt_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
- struct resource *res;
u32 reg_tmp;
mmc = platform_get_drvdata(pdev);
@@ -910,17 +908,9 @@ static int wmt_mci_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk_sdmmc);
clk_put(priv->clk_sdmmc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- mmc_free_host(mmc);
-
dev_info(&pdev->dev, "WMT MCI device removed\n");
-
- return 0;
}
-#ifdef CONFIG_PM
static int wmt_mci_suspend(struct device *dev)
{
u32 reg_tmp;
@@ -972,18 +962,7 @@ static int wmt_mci_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops wmt_mci_pm = {
- .suspend = wmt_mci_suspend,
- .resume = wmt_mci_resume,
-};
-
-#define wmt_mci_pm_ops (&wmt_mci_pm)
-
-#else /* !CONFIG_PM */
-
-#define wmt_mci_pm_ops NULL
-
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(wmt_mci_pm_ops, wmt_mci_suspend, wmt_mci_resume);
static struct platform_driver wmt_mci_driver = {
.probe = wmt_mci_probe,
@@ -991,7 +970,7 @@ static struct platform_driver wmt_mci_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = wmt_mci_pm_ops,
+ .pm = pm_sleep_ptr(&wmt_mci_pm_ops),
.of_match_table = wmt_mci_dt_ids,
},
};