diff options
Diffstat (limited to 'drivers/mmc/host/dw_mmc.c')
| -rw-r--r-- | drivers/mmc/host/dw_mmc.c | 392 |
1 files changed, 310 insertions, 82 deletions
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index d333130d1531..9e74b675e92d 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -17,9 +17,11 @@ #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/ioport.h> +#include <linux/ktime.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/prandom.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/stat.h> @@ -33,7 +35,6 @@ #include <linux/bitops.h> #include <linux/regulator/consumer.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/mmc/slot-gpio.h> #include "dw_mmc.h" @@ -174,13 +175,16 @@ static void dw_mci_init_debugfs(struct dw_mci_slot *slot) if (!root) return; - debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops); - debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops); - debugfs_create_u32("state", S_IRUSR, root, &host->state); - debugfs_create_xul("pending_events", S_IRUSR, root, + debugfs_create_file("regs", 0400, root, host, &dw_mci_regs_fops); + debugfs_create_file("req", 0400, root, slot, &dw_mci_req_fops); + debugfs_create_u32("state", 0400, root, &host->state); + debugfs_create_xul("pending_events", 0400, root, &host->pending_events); - debugfs_create_xul("completed_events", S_IRUSR, root, + debugfs_create_xul("completed_events", 0400, root, &host->completed_events); +#ifdef CONFIG_FAULT_INJECTION + fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc); +#endif } #endif /* defined(CONFIG_DEBUG_FS) */ @@ -329,8 +333,8 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) cmdr == MMC_READ_MULTIPLE_BLOCK || cmdr == MMC_WRITE_BLOCK || cmdr == MMC_WRITE_MULTIPLE_BLOCK || - cmdr == MMC_SEND_TUNING_BLOCK || - cmdr == MMC_SEND_TUNING_BLOCK_HS200) { + mmc_op_tuning(cmdr) || + cmdr == MMC_GEN_CMD) { stop->opcode = MMC_STOP_TRANSMISSION; stop->arg = 0; stop->flags = MMC_RSP_R1B | MMC_CMD_AC; @@ -489,7 +493,7 @@ static void dw_mci_dmac_complete_dma(void *arg) */ if (data) { set_bit(EVENT_XFER_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); } } @@ -782,6 +786,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host, int ret = 0; /* Set external dma config: burst size, burst width */ + memset(&cfg, 0, sizeof(cfg)); cfg.dst_addr = host->phy_regs + fifo_offset; cfg.src_addr = cfg.dst_addr; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -1177,7 +1182,7 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) /* * Use the initial fifoth_val for PIO mode. If wm_algined * is set, we set watermark same as data size. - * If next issued data may be transfered by DMA mode, + * If next issued data may be transferred by DMA mode, * prev_blksz should be invalidated. */ if (host->wm_aligned) @@ -1277,6 +1282,37 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) mci_writel(host, CTYPE, (slot->ctype << slot->id)); } +static void dw_mci_set_data_timeout(struct dw_mci *host, + unsigned int timeout_ns) +{ + const struct dw_mci_drv_data *drv_data = host->drv_data; + u32 clk_div, tmout; + u64 tmp; + + if (drv_data && drv_data->set_data_timeout) + return drv_data->set_data_timeout(host, timeout_ns); + + clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2; + if (clk_div == 0) + clk_div = 1; + + tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC); + tmp = DIV_ROUND_UP_ULL(tmp, clk_div); + + /* TMOUT[7:0] (RESPONSE_TIMEOUT) */ + tmout = 0xFF; /* Set maximum */ + + /* TMOUT[31:8] (DATA_TIMEOUT) */ + if (!tmp || tmp > 0xFFFFFF) + tmout |= (0xFFFFFF << 8); + else + tmout |= (tmp & 0xFFFFFF) << 8; + + mci_writel(host, TMOUT, tmout); + dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x", + timeout_ns, tmout >> 8); +} + static void __dw_mci_start_request(struct dw_mci *host, struct dw_mci_slot *slot, struct mmc_command *cmd) @@ -1297,7 +1333,7 @@ static void __dw_mci_start_request(struct dw_mci *host, data = cmd->data; if (data) { - mci_writel(host, TMOUT, 0xFFFFFFFF); + dw_mci_set_data_timeout(host, data->timeout_ns); mci_writel(host, BYTCNT, data->blksz*data->blocks); mci_writel(host, BLKSIZ, data->blksz); } @@ -1325,7 +1361,7 @@ static void __dw_mci_start_request(struct dw_mci *host, * is just about to roll over. * * We do this whole thing under spinlock and only if the - * command hasn't already completed (indicating the the irq + * command hasn't already completed (indicating the irq * already ran so we don't want the timeout). */ spin_lock_irqsave(&host->irq_lock, irqflags); @@ -1581,6 +1617,7 @@ static void dw_mci_hw_reset(struct mmc_host *mmc) { struct dw_mci_slot *slot = mmc_priv(mmc); struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; int reset; if (host->use_dma == TRANS_MODE_IDMAC) @@ -1590,6 +1627,11 @@ static void dw_mci_hw_reset(struct mmc_host *mmc) SDMMC_CTRL_FIFO_RESET)) return; + if (drv_data && drv_data->hw_reset) { + drv_data->hw_reset(host); + return; + } + /* * According to eMMC spec, card reset procedure: * tRstW >= 1us: RST_n pulse width @@ -1605,37 +1647,32 @@ static void dw_mci_hw_reset(struct mmc_host *mmc) usleep_range(200, 300); } -static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card) +static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare) { - struct dw_mci_slot *slot = mmc_priv(mmc); struct dw_mci *host = slot->host; + const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; + u32 clk_en_a_old; + u32 clk_en_a; /* * Low power mode will stop the card clock when idle. According to the * description of the CLKENA register we should disable low power mode * for SDIO cards if we need SDIO interrupts to work. */ - if (mmc->caps & MMC_CAP_SDIO_IRQ) { - const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; - u32 clk_en_a_old; - u32 clk_en_a; - - clk_en_a_old = mci_readl(host, CLKENA); - if (card->type == MMC_TYPE_SDIO || - card->type == MMC_TYPE_SD_COMBO) { - set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); - clk_en_a = clk_en_a_old & ~clken_low_pwr; - } else { - clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); - clk_en_a = clk_en_a_old | clken_low_pwr; - } + clk_en_a_old = mci_readl(host, CLKENA); + if (prepare) { + set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); + clk_en_a = clk_en_a_old & ~clken_low_pwr; + } else { + clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); + clk_en_a = clk_en_a_old | clken_low_pwr; + } - if (clk_en_a != clk_en_a_old) { - mci_writel(host, CLKENA, clk_en_a); - mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | - SDMMC_CMD_PRV_DAT_WAIT, 0); - } + if (clk_en_a != clk_en_a_old) { + mci_writel(host, CLKENA, clk_en_a); + mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, + 0); } } @@ -1663,6 +1700,7 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) struct dw_mci_slot *slot = mmc_priv(mmc); struct dw_mci *host = slot->host; + dw_mci_prepare_sdio_irq(slot, enb); __dw_mci_enable_sdio_irq(slot, enb); /* Avoid runtime suspending the device when SDIO IRQ is enabled */ @@ -1778,16 +1816,81 @@ static const struct mmc_host_ops dw_mci_ops = { .set_ios = dw_mci_set_ios, .get_ro = dw_mci_get_ro, .get_cd = dw_mci_get_cd, - .hw_reset = dw_mci_hw_reset, + .card_hw_reset = dw_mci_hw_reset, .enable_sdio_irq = dw_mci_enable_sdio_irq, .ack_sdio_irq = dw_mci_ack_sdio_irq, .execute_tuning = dw_mci_execute_tuning, .card_busy = dw_mci_card_busy, .start_signal_voltage_switch = dw_mci_switch_voltage, - .init_card = dw_mci_init_card, .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, }; +#ifdef CONFIG_FAULT_INJECTION +static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t) +{ + struct dw_mci *host = container_of(t, struct dw_mci, fault_timer); + unsigned long flags; + + spin_lock_irqsave(&host->irq_lock, flags); + + /* + * Only inject an error if we haven't already got an error or data over + * interrupt. + */ + if (!host->data_status) { + host->data_status = SDMMC_INT_DCRC; + set_bit(EVENT_DATA_ERROR, &host->pending_events); + queue_work(system_bh_wq, &host->bh_work); + } + + spin_unlock_irqrestore(&host->irq_lock, flags); + + return HRTIMER_NORESTART; +} + +static void dw_mci_start_fault_timer(struct dw_mci *host) +{ + struct mmc_data *data = host->data; + + if (!data || data->blocks <= 1) + return; + + if (!should_fail(&host->fail_data_crc, 1)) + return; + + /* + * Try to inject the error at random points during the data transfer. + */ + hrtimer_start(&host->fault_timer, + ms_to_ktime(get_random_u32_below(25)), + HRTIMER_MODE_REL); +} + +static void dw_mci_stop_fault_timer(struct dw_mci *host) +{ + hrtimer_cancel(&host->fault_timer); +} + +static void dw_mci_init_fault(struct dw_mci *host) +{ + host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER; + + hrtimer_setup(&host->fault_timer, dw_mci_fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); +} +#else +static void dw_mci_init_fault(struct dw_mci *host) +{ +} + +static void dw_mci_start_fault_timer(struct dw_mci *host) +{ +} + +static void dw_mci_stop_fault_timer(struct dw_mci *host) +{ +} +#endif + static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) __releases(&host->lock) __acquires(&host->lock) @@ -1899,12 +2002,16 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) static void dw_mci_set_drto(struct dw_mci *host) { + const struct dw_mci_drv_data *drv_data = host->drv_data; unsigned int drto_clks; unsigned int drto_div; unsigned int drto_ms; unsigned long irqflags; - drto_clks = mci_readl(host, TMOUT) >> 8; + if (drv_data && drv_data->get_drto_clks) + drto_clks = drv_data->get_drto_clks(host); + else + drto_clks = mci_readl(host, TMOUT) >> 8; drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; if (drto_div == 0) drto_div = 1; @@ -1912,6 +2019,8 @@ static void dw_mci_set_drto(struct dw_mci *host) drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, host->bus_hz); + dev_dbg(host->dev, "drto_ms: %u\n", drto_ms); + /* add a bit spare time */ drto_ms += 10; @@ -1931,10 +2040,10 @@ static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) * Really be certain that the timer has stopped. This is a bit of * paranoia and could only really happen if we had really bad * interrupt latency and the interrupt routine and timeout were - * running concurrently so that the del_timer() in the interrupt + * running concurrently so that the timer_delete() in the interrupt * handler couldn't run. */ - WARN_ON(del_timer_sync(&host->cto_timer)); + WARN_ON(timer_delete_sync(&host->cto_timer)); clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); return true; @@ -1946,15 +2055,15 @@ static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) return false; /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ - WARN_ON(del_timer_sync(&host->dto_timer)); + WARN_ON(timer_delete_sync(&host->dto_timer)); clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); return true; } -static void dw_mci_tasklet_func(struct tasklet_struct *t) +static void dw_mci_work_func(struct work_struct *t) { - struct dw_mci *host = from_tasklet(host, t, tasklet); + struct dw_mci *host = from_work(host, t, bh_work); struct mmc_data *data; struct mmc_command *cmd; struct mmc_request *mrq; @@ -2009,17 +2118,18 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) * will waste a bit of time (we already know * the command was bad), it can't cause any * errors since it's possible it would have - * taken place anyway if this tasklet got + * taken place anyway if this bh work got * delayed. Allowing the transfer to take place * avoids races and keeps things simple. */ - if (err != -ETIMEDOUT) { + if (err != -ETIMEDOUT && + host->dir_status == DW_MCI_RECV_STATUS) { state = STATE_SENDING_DATA; continue; } - dw_mci_stop_dma(host); send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_SENDING_STOP; break; } @@ -2043,10 +2153,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) */ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { - dw_mci_stop_dma(host); if (!(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE))) send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_DATA_ERROR; break; } @@ -2079,10 +2189,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) */ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { - dw_mci_stop_dma(host); if (!(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE))) send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_DATA_ERROR; break; } @@ -2102,6 +2212,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) break; } + dw_mci_stop_fault_timer(host); host->data = NULL; set_bit(EVENT_DATA_COMPLETE, &host->completed_events); err = dw_mci_data_complete(host, data); @@ -2151,6 +2262,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) if (mrq->cmd->error && mrq->data) dw_mci_reset(host); + dw_mci_stop_fault_timer(host); host->cmd = NULL; host->data = NULL; @@ -2466,6 +2578,91 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) } } +static void dw_mci_push_data64_32(struct dw_mci *host, void *buf, int cnt) +{ + struct mmc_data *data = host->data; + int init_cnt = cnt; + + /* try and push anything in the part_buf */ + if (unlikely(host->part_buf_count)) { + int len = dw_mci_push_part_bytes(host, buf, cnt); + + buf += len; + cnt -= len; + + if (host->part_buf_count == 8) { + mci_fifo_l_writeq(host->fifo_reg, host->part_buf); + host->part_buf_count = 0; + } + } +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x7)) { + while (cnt >= 8) { + u64 aligned_buf[16]; + int len = min(cnt & -8, (int)sizeof(aligned_buf)); + int items = len >> 3; + int i; + /* memcpy from input buffer into aligned buffer */ + memcpy(aligned_buf, buf, len); + buf += len; + cnt -= len; + /* push data from aligned buffer into fifo */ + for (i = 0; i < items; ++i) + mci_fifo_l_writeq(host->fifo_reg, aligned_buf[i]); + } + } else +#endif + { + u64 *pdata = buf; + + for (; cnt >= 8; cnt -= 8) + mci_fifo_l_writeq(host->fifo_reg, *pdata++); + buf = pdata; + } + /* put anything remaining in the part_buf */ + if (cnt) { + dw_mci_set_part_bytes(host, buf, cnt); + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == + (data->blksz * data->blocks)) + mci_fifo_l_writeq(host->fifo_reg, host->part_buf); + } +} + +static void dw_mci_pull_data64_32(struct dw_mci *host, void *buf, int cnt) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x7)) { + while (cnt >= 8) { + /* pull data from fifo into aligned buffer */ + u64 aligned_buf[16]; + int len = min(cnt & -8, (int)sizeof(aligned_buf)); + int items = len >> 3; + int i; + + for (i = 0; i < items; ++i) + aligned_buf[i] = mci_fifo_l_readq(host->fifo_reg); + + /* memcpy from aligned buffer into output buffer */ + memcpy(buf, aligned_buf, len); + buf += len; + cnt -= len; + } + } else +#endif + { + u64 *pdata = buf; + + for (; cnt >= 8; cnt -= 8) + *pdata++ = mci_fifo_l_readq(host->fifo_reg); + buf = pdata; + } + if (cnt) { + host->part_buf = mci_fifo_l_readq(host->fifo_reg); + dw_mci_pull_final_bytes(host, buf, cnt); + } +} + static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) { int len; @@ -2591,7 +2788,7 @@ done: static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) { - del_timer(&host->cto_timer); + timer_delete(&host->cto_timer); if (!host->cmd_status) host->cmd_status = status; @@ -2599,7 +2796,9 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) smp_wmb(); /* drain writebuffer */ set_bit(EVENT_CMD_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); + + dw_mci_start_fault_timer(host); } static void dw_mci_handle_cd(struct dw_mci *host) @@ -2633,13 +2832,13 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) dw_mci_cmd_interrupt(host, pending); spin_unlock(&host->irq_lock); - del_timer(&host->cmd11_timer); + timer_delete(&host->cmd11_timer); } if (pending & DW_MCI_CMD_ERROR_FLAGS) { spin_lock(&host->irq_lock); - del_timer(&host->cto_timer); + timer_delete(&host->cto_timer); mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); host->cmd_status = pending; smp_wmb(); /* drain writebuffer */ @@ -2649,18 +2848,31 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } if (pending & DW_MCI_DATA_ERROR_FLAGS) { + spin_lock(&host->irq_lock); + + if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT) + timer_delete(&host->dto_timer); + /* if there is an error report DATA_ERROR */ mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); host->data_status = pending; smp_wmb(); /* drain writebuffer */ set_bit(EVENT_DATA_ERROR, &host->pending_events); - tasklet_schedule(&host->tasklet); + + if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT) + /* In case of error, we cannot expect a DTO */ + set_bit(EVENT_DATA_COMPLETE, + &host->pending_events); + + queue_work(system_bh_wq, &host->bh_work); + + spin_unlock(&host->irq_lock); } if (pending & SDMMC_INT_DATA_OVER) { spin_lock(&host->irq_lock); - del_timer(&host->dto_timer); + timer_delete(&host->dto_timer); mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); if (!host->data_status) @@ -2671,7 +2883,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) dw_mci_read_data_pio(host, true); } set_bit(EVENT_DATA_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); spin_unlock(&host->irq_lock); } @@ -2751,6 +2963,9 @@ static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) if (host->pdata->pm_caps) mmc->pm_caps = host->pdata->pm_caps; + if (drv_data) + mmc->caps |= drv_data->common_caps; + if (host->dev->of_node) { ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); if (ctrl_id < 0) @@ -2771,7 +2986,12 @@ static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) if (host->pdata->caps2) mmc->caps2 = host->pdata->caps2; - mmc->f_min = DW_MCI_FREQ_MIN; + /* if host has set a minimum_freq, we should respect it */ + if (host->minimum_speed) + mmc->f_min = host->minimum_speed; + else + mmc->f_min = DW_MCI_FREQ_MIN; + if (!mmc->f_max) mmc->f_max = DW_MCI_FREQ_MAX; @@ -2788,7 +3008,7 @@ static int dw_mci_init_slot(struct dw_mci *host) struct dw_mci_slot *slot; int ret; - mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); + mmc = devm_mmc_alloc_host(host->dev, sizeof(*slot)); if (!mmc) return -ENOMEM; @@ -2804,18 +3024,18 @@ static int dw_mci_init_slot(struct dw_mci *host) /*if there are external regulators, get them*/ ret = mmc_regulator_get_supply(mmc); if (ret) - goto err_host_allocated; + return ret; if (!mmc->ocr_avail) mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; ret = mmc_of_parse(mmc); if (ret) - goto err_host_allocated; + return ret; ret = dw_mci_init_slot_caps(slot); if (ret) - goto err_host_allocated; + return ret; /* Useful defaults if platform data is unset. */ if (host->use_dma == TRANS_MODE_IDMAC) { @@ -2845,17 +3065,13 @@ static int dw_mci_init_slot(struct dw_mci *host) ret = mmc_add_host(mmc); if (ret) - goto err_host_allocated; + return ret; #if defined(CONFIG_DEBUG_FS) dw_mci_init_debugfs(slot); #endif return 0; - -err_host_allocated: - mmc_free_host(mmc); - return ret; } static void dw_mci_cleanup_slot(struct dw_mci_slot *slot) @@ -2863,7 +3079,6 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot) /* Debugfs stuff is cleaned up by mmc core */ mmc_remove_host(slot->mmc); slot->host->slot = NULL; - mmc_free_host(slot->mmc); } static void dw_mci_init_dma(struct dw_mci *host) @@ -2905,9 +3120,8 @@ static void dw_mci_init_dma(struct dw_mci *host) host->dma_64bit_address = 1; dev_info(host->dev, "IDMAC supports 64-bit address mode.\n"); - if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) - dma_set_coherent_mask(host->dev, - DMA_BIT_MASK(64)); + if (dma_set_mask_and_coherent(host->dev, DMA_BIT_MASK(64))) + dev_info(host->dev, "Fail to set 64-bit DMA mask"); } else { /* host supports IDMAC in 32-bit address mode */ host->dma_64bit_address = 0; @@ -2930,8 +3144,7 @@ static void dw_mci_init_dma(struct dw_mci *host) dev_info(host->dev, "Using internal DMA controller.\n"); } else { /* TRANS_MODE_EDMAC: check dma bindings again */ - if ((device_property_read_string_array(dev, "dma-names", - NULL, 0) < 0) || + if ((device_property_string_array_count(dev, "dma-names") < 0) || !device_property_present(dev, "dmas")) { goto no_dma; } @@ -2960,7 +3173,7 @@ no_dma: static void dw_mci_cmd11_timer(struct timer_list *t) { - struct dw_mci *host = from_timer(host, t, cmd11_timer); + struct dw_mci *host = timer_container_of(host, t, cmd11_timer); if (host->state != STATE_SENDING_CMD11) { dev_warn(host->dev, "Unexpected CMD11 timeout\n"); @@ -2969,12 +3182,12 @@ static void dw_mci_cmd11_timer(struct timer_list *t) host->cmd_status = SDMMC_INT_RTO; set_bit(EVENT_CMD_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); } static void dw_mci_cto_timer(struct timer_list *t) { - struct dw_mci *host = from_timer(host, t, cto_timer); + struct dw_mci *host = timer_container_of(host, t, cto_timer); unsigned long irqflags; u32 pending; @@ -3015,7 +3228,7 @@ static void dw_mci_cto_timer(struct timer_list *t) */ host->cmd_status = SDMMC_INT_RTO; set_bit(EVENT_CMD_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); break; default: dev_warn(host->dev, "Unexpected command timeout, state %d\n", @@ -3029,7 +3242,7 @@ exit: static void dw_mci_dto_timer(struct timer_list *t) { - struct dw_mci *host = from_timer(host, t, dto_timer); + struct dw_mci *host = timer_container_of(host, t, dto_timer); unsigned long irqflags; u32 pending; @@ -3066,7 +3279,7 @@ static void dw_mci_dto_timer(struct timer_list *t) host->data_status = SDMMC_INT_DRTO; set_bit(EVENT_DATA_ERROR, &host->pending_events); set_bit(EVENT_DATA_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); break; default: dev_warn(host->dev, "Unexpected data timeout, state %d\n", @@ -3164,6 +3377,10 @@ int dw_mci_probe(struct dw_mci *host) host->biu_clk = devm_clk_get(host->dev, "biu"); if (IS_ERR(host->biu_clk)) { dev_dbg(host->dev, "biu clock not available\n"); + ret = PTR_ERR(host->biu_clk); + if (ret == -EPROBE_DEFER) + return ret; + } else { ret = clk_prepare_enable(host->biu_clk); if (ret) { @@ -3175,6 +3392,10 @@ int dw_mci_probe(struct dw_mci *host) host->ciu_clk = devm_clk_get(host->dev, "ciu"); if (IS_ERR(host->ciu_clk)) { dev_dbg(host->dev, "ciu clock not available\n"); + ret = PTR_ERR(host->ciu_clk); + if (ret == -EPROBE_DEFER) + goto err_clk_biu; + host->bus_hz = host->pdata->bus_hz; } else { ret = clk_prepare_enable(host->ciu_clk); @@ -3223,6 +3444,8 @@ int dw_mci_probe(struct dw_mci *host) spin_lock_init(&host->irq_lock); INIT_LIST_HEAD(&host->queue); + dw_mci_init_fault(host); + /* * Get the host data width - this assumes that HCON has been set with * the correct values. @@ -3234,8 +3457,13 @@ int dw_mci_probe(struct dw_mci *host) width = 16; host->data_shift = 1; } else if (i == 2) { - host->push_data = dw_mci_push_data64; - host->pull_data = dw_mci_pull_data64; + if ((host->quirks & DW_MMC_QUIRK_FIFO64_32)) { + host->push_data = dw_mci_push_data64_32; + host->pull_data = dw_mci_pull_data64_32; + } else { + host->push_data = dw_mci_push_data64; + host->pull_data = dw_mci_pull_data64; + } width = 64; host->data_shift = 3; } else { @@ -3304,7 +3532,7 @@ int dw_mci_probe(struct dw_mci *host) else host->fifo_reg = host->regs + DATA_240A_OFFSET; - tasklet_setup(&host->tasklet, dw_mci_tasklet_func); + INIT_WORK(&host->bh_work, dw_mci_work_func); ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host); if (ret) @@ -3388,7 +3616,7 @@ int dw_mci_runtime_suspend(struct device *dev) clk_disable_unprepare(host->ciu_clk); if (host->slot && - (mmc_can_gpio_cd(host->slot->mmc) || + (mmc_host_can_gpio_cd(host->slot->mmc) || !mmc_card_is_removable(host->slot->mmc))) clk_disable_unprepare(host->biu_clk); @@ -3402,7 +3630,7 @@ int dw_mci_runtime_resume(struct device *dev) struct dw_mci *host = dev_get_drvdata(dev); if (host->slot && - (mmc_can_gpio_cd(host->slot->mmc) || + (mmc_host_can_gpio_cd(host->slot->mmc) || !mmc_card_is_removable(host->slot->mmc))) { ret = clk_prepare_enable(host->biu_clk); if (ret) @@ -3439,7 +3667,7 @@ int dw_mci_runtime_resume(struct device *dev) mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); - if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER) + if (host->slot && host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER) dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios); /* Force setup bus to guarantee available clock output */ @@ -3456,7 +3684,7 @@ int dw_mci_runtime_resume(struct device *dev) err: if (host->slot && - (mmc_can_gpio_cd(host->slot->mmc) || + (mmc_host_can_gpio_cd(host->slot->mmc) || !mmc_card_is_removable(host->slot->mmc))) clk_disable_unprepare(host->biu_clk); |
