summaryrefslogtreecommitdiff
path: root/drivers/mmc/host/dw_mmc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host/dw_mmc.c')
-rw-r--r--drivers/mmc/host/dw_mmc.c196
1 files changed, 146 insertions, 50 deletions
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 829af2c98a44..9e74b675e92d 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -35,7 +35,6 @@
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
#include "dw_mmc.h"
@@ -176,12 +175,12 @@ static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
if (!root)
return;
- debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
- debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
- debugfs_create_u32("state", S_IRUSR, root, &host->state);
- debugfs_create_xul("pending_events", S_IRUSR, root,
+ debugfs_create_file("regs", 0400, root, host, &dw_mci_regs_fops);
+ debugfs_create_file("req", 0400, root, slot, &dw_mci_req_fops);
+ debugfs_create_u32("state", 0400, root, &host->state);
+ debugfs_create_xul("pending_events", 0400, root,
&host->pending_events);
- debugfs_create_xul("completed_events", S_IRUSR, root,
+ debugfs_create_xul("completed_events", 0400, root,
&host->completed_events);
#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc);
@@ -494,7 +493,7 @@ static void dw_mci_dmac_complete_dma(void *arg)
*/
if (data) {
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
}
@@ -1183,7 +1182,7 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
/*
* Use the initial fifoth_val for PIO mode. If wm_algined
* is set, we set watermark same as data size.
- * If next issued data may be transfered by DMA mode,
+ * If next issued data may be transferred by DMA mode,
* prev_blksz should be invalidated.
*/
if (host->wm_aligned)
@@ -1618,6 +1617,7 @@ static void dw_mci_hw_reset(struct mmc_host *mmc)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
int reset;
if (host->use_dma == TRANS_MODE_IDMAC)
@@ -1627,6 +1627,11 @@ static void dw_mci_hw_reset(struct mmc_host *mmc)
SDMMC_CTRL_FIFO_RESET))
return;
+ if (drv_data && drv_data->hw_reset) {
+ drv_data->hw_reset(host);
+ return;
+ }
+
/*
* According to eMMC spec, card reset procedure:
* tRstW >= 1us: RST_n pulse width
@@ -1835,7 +1840,7 @@ static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
if (!host->data_status) {
host->data_status = SDMMC_INT_DCRC;
set_bit(EVENT_DATA_ERROR, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
spin_unlock_irqrestore(&host->irq_lock, flags);
@@ -1870,8 +1875,7 @@ static void dw_mci_init_fault(struct dw_mci *host)
{
host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
- hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- host->fault_timer.function = dw_mci_fault_timer;
+ hrtimer_setup(&host->fault_timer, dw_mci_fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
#else
static void dw_mci_init_fault(struct dw_mci *host)
@@ -2036,10 +2040,10 @@ static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
* Really be certain that the timer has stopped. This is a bit of
* paranoia and could only really happen if we had really bad
* interrupt latency and the interrupt routine and timeout were
- * running concurrently so that the del_timer() in the interrupt
+ * running concurrently so that the timer_delete() in the interrupt
* handler couldn't run.
*/
- WARN_ON(del_timer_sync(&host->cto_timer));
+ WARN_ON(timer_delete_sync(&host->cto_timer));
clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
return true;
@@ -2051,15 +2055,15 @@ static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
return false;
/* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
- WARN_ON(del_timer_sync(&host->dto_timer));
+ WARN_ON(timer_delete_sync(&host->dto_timer));
clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
return true;
}
-static void dw_mci_tasklet_func(struct tasklet_struct *t)
+static void dw_mci_work_func(struct work_struct *t)
{
- struct dw_mci *host = from_tasklet(host, t, tasklet);
+ struct dw_mci *host = from_work(host, t, bh_work);
struct mmc_data *data;
struct mmc_command *cmd;
struct mmc_request *mrq;
@@ -2114,7 +2118,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
* will waste a bit of time (we already know
* the command was bad), it can't cause any
* errors since it's possible it would have
- * taken place anyway if this tasklet got
+ * taken place anyway if this bh work got
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
@@ -2574,6 +2578,91 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
}
}
+static void dw_mci_push_data64_32(struct dw_mci *host, void *buf, int cnt)
+{
+ struct mmc_data *data = host->data;
+ int init_cnt = cnt;
+
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+
+ buf += len;
+ cnt -= len;
+
+ if (host->part_buf_count == 8) {
+ mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_fifo_l_writeq(host->fifo_reg, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+
+ for (; cnt >= 8; cnt -= 8)
+ mci_fifo_l_writeq(host->fifo_reg, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) ==
+ (data->blksz * data->blocks))
+ mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
+ }
+}
+
+static void dw_mci_pull_data64_32(struct dw_mci *host, void *buf, int cnt)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ /* pull data from fifo into aligned buffer */
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_fifo_l_readq(host->fifo_reg);
+
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+
+ for (; cnt >= 8; cnt -= 8)
+ *pdata++ = mci_fifo_l_readq(host->fifo_reg);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf = mci_fifo_l_readq(host->fifo_reg);
+ dw_mci_pull_final_bytes(host, buf, cnt);
+ }
+}
+
static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
{
int len;
@@ -2699,7 +2788,7 @@ done:
static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
{
- del_timer(&host->cto_timer);
+ timer_delete(&host->cto_timer);
if (!host->cmd_status)
host->cmd_status = status;
@@ -2707,7 +2796,7 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
dw_mci_start_fault_timer(host);
}
@@ -2743,13 +2832,13 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
dw_mci_cmd_interrupt(host, pending);
spin_unlock(&host->irq_lock);
- del_timer(&host->cmd11_timer);
+ timer_delete(&host->cmd11_timer);
}
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
spin_lock(&host->irq_lock);
- del_timer(&host->cto_timer);
+ timer_delete(&host->cto_timer);
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
smp_wmb(); /* drain writebuffer */
@@ -2762,7 +2851,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
spin_lock(&host->irq_lock);
if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
- del_timer(&host->dto_timer);
+ timer_delete(&host->dto_timer);
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
@@ -2775,7 +2864,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
set_bit(EVENT_DATA_COMPLETE,
&host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
spin_unlock(&host->irq_lock);
}
@@ -2783,7 +2872,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_DATA_OVER) {
spin_lock(&host->irq_lock);
- del_timer(&host->dto_timer);
+ timer_delete(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (!host->data_status)
@@ -2794,7 +2883,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
dw_mci_read_data_pio(host, true);
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
spin_unlock(&host->irq_lock);
}
@@ -2919,7 +3008,7 @@ static int dw_mci_init_slot(struct dw_mci *host)
struct dw_mci_slot *slot;
int ret;
- mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
+ mmc = devm_mmc_alloc_host(host->dev, sizeof(*slot));
if (!mmc)
return -ENOMEM;
@@ -2935,18 +3024,18 @@ static int dw_mci_init_slot(struct dw_mci *host)
/*if there are external regulators, get them*/
ret = mmc_regulator_get_supply(mmc);
if (ret)
- goto err_host_allocated;
+ return ret;
if (!mmc->ocr_avail)
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
ret = mmc_of_parse(mmc);
if (ret)
- goto err_host_allocated;
+ return ret;
ret = dw_mci_init_slot_caps(slot);
if (ret)
- goto err_host_allocated;
+ return ret;
/* Useful defaults if platform data is unset. */
if (host->use_dma == TRANS_MODE_IDMAC) {
@@ -2976,17 +3065,13 @@ static int dw_mci_init_slot(struct dw_mci *host)
ret = mmc_add_host(mmc);
if (ret)
- goto err_host_allocated;
+ return ret;
#if defined(CONFIG_DEBUG_FS)
dw_mci_init_debugfs(slot);
#endif
return 0;
-
-err_host_allocated:
- mmc_free_host(mmc);
- return ret;
}
static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
@@ -2994,7 +3079,6 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
/* Debugfs stuff is cleaned up by mmc core */
mmc_remove_host(slot->mmc);
slot->host->slot = NULL;
- mmc_free_host(slot->mmc);
}
static void dw_mci_init_dma(struct dw_mci *host)
@@ -3036,9 +3120,8 @@ static void dw_mci_init_dma(struct dw_mci *host)
host->dma_64bit_address = 1;
dev_info(host->dev,
"IDMAC supports 64-bit address mode.\n");
- if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
- dma_set_coherent_mask(host->dev,
- DMA_BIT_MASK(64));
+ if (dma_set_mask_and_coherent(host->dev, DMA_BIT_MASK(64)))
+ dev_info(host->dev, "Fail to set 64-bit DMA mask");
} else {
/* host supports IDMAC in 32-bit address mode */
host->dma_64bit_address = 0;
@@ -3090,7 +3173,7 @@ no_dma:
static void dw_mci_cmd11_timer(struct timer_list *t)
{
- struct dw_mci *host = from_timer(host, t, cmd11_timer);
+ struct dw_mci *host = timer_container_of(host, t, cmd11_timer);
if (host->state != STATE_SENDING_CMD11) {
dev_warn(host->dev, "Unexpected CMD11 timeout\n");
@@ -3099,12 +3182,12 @@ static void dw_mci_cmd11_timer(struct timer_list *t)
host->cmd_status = SDMMC_INT_RTO;
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
static void dw_mci_cto_timer(struct timer_list *t)
{
- struct dw_mci *host = from_timer(host, t, cto_timer);
+ struct dw_mci *host = timer_container_of(host, t, cto_timer);
unsigned long irqflags;
u32 pending;
@@ -3145,7 +3228,7 @@ static void dw_mci_cto_timer(struct timer_list *t)
*/
host->cmd_status = SDMMC_INT_RTO;
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
break;
default:
dev_warn(host->dev, "Unexpected command timeout, state %d\n",
@@ -3159,7 +3242,7 @@ exit:
static void dw_mci_dto_timer(struct timer_list *t)
{
- struct dw_mci *host = from_timer(host, t, dto_timer);
+ struct dw_mci *host = timer_container_of(host, t, dto_timer);
unsigned long irqflags;
u32 pending;
@@ -3196,7 +3279,7 @@ static void dw_mci_dto_timer(struct timer_list *t)
host->data_status = SDMMC_INT_DRTO;
set_bit(EVENT_DATA_ERROR, &host->pending_events);
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
break;
default:
dev_warn(host->dev, "Unexpected data timeout, state %d\n",
@@ -3294,6 +3377,10 @@ int dw_mci_probe(struct dw_mci *host)
host->biu_clk = devm_clk_get(host->dev, "biu");
if (IS_ERR(host->biu_clk)) {
dev_dbg(host->dev, "biu clock not available\n");
+ ret = PTR_ERR(host->biu_clk);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
} else {
ret = clk_prepare_enable(host->biu_clk);
if (ret) {
@@ -3305,6 +3392,10 @@ int dw_mci_probe(struct dw_mci *host)
host->ciu_clk = devm_clk_get(host->dev, "ciu");
if (IS_ERR(host->ciu_clk)) {
dev_dbg(host->dev, "ciu clock not available\n");
+ ret = PTR_ERR(host->ciu_clk);
+ if (ret == -EPROBE_DEFER)
+ goto err_clk_biu;
+
host->bus_hz = host->pdata->bus_hz;
} else {
ret = clk_prepare_enable(host->ciu_clk);
@@ -3366,8 +3457,13 @@ int dw_mci_probe(struct dw_mci *host)
width = 16;
host->data_shift = 1;
} else if (i == 2) {
- host->push_data = dw_mci_push_data64;
- host->pull_data = dw_mci_pull_data64;
+ if ((host->quirks & DW_MMC_QUIRK_FIFO64_32)) {
+ host->push_data = dw_mci_push_data64_32;
+ host->pull_data = dw_mci_pull_data64_32;
+ } else {
+ host->push_data = dw_mci_push_data64;
+ host->pull_data = dw_mci_pull_data64;
+ }
width = 64;
host->data_shift = 3;
} else {
@@ -3436,7 +3532,7 @@ int dw_mci_probe(struct dw_mci *host)
else
host->fifo_reg = host->regs + DATA_240A_OFFSET;
- tasklet_setup(&host->tasklet, dw_mci_tasklet_func);
+ INIT_WORK(&host->bh_work, dw_mci_work_func);
ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
host->irq_flags, "dw-mci", host);
if (ret)
@@ -3520,7 +3616,7 @@ int dw_mci_runtime_suspend(struct device *dev)
clk_disable_unprepare(host->ciu_clk);
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
+ (mmc_host_can_gpio_cd(host->slot->mmc) ||
!mmc_card_is_removable(host->slot->mmc)))
clk_disable_unprepare(host->biu_clk);
@@ -3534,7 +3630,7 @@ int dw_mci_runtime_resume(struct device *dev)
struct dw_mci *host = dev_get_drvdata(dev);
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
+ (mmc_host_can_gpio_cd(host->slot->mmc) ||
!mmc_card_is_removable(host->slot->mmc))) {
ret = clk_prepare_enable(host->biu_clk);
if (ret)
@@ -3588,7 +3684,7 @@ int dw_mci_runtime_resume(struct device *dev)
err:
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
+ (mmc_host_can_gpio_cd(host->slot->mmc) ||
!mmc_card_is_removable(host->slot->mmc)))
clk_disable_unprepare(host->biu_clk);