diff options
Diffstat (limited to 'drivers/mmc/host/sunxi-mmc.c')
| -rw-r--r-- | drivers/mmc/host/sunxi-mmc.c | 562 |
1 files changed, 390 insertions, 172 deletions
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 0fb4e4c119e1..8dbcff53a631 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1,47 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for sunxi SD/MMC host controllers * (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd. * (C) Copyright 2007-2011 Aaron Maoye <leafy.myeh@reuuimllatech.com> * (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch> - * (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch> + * (C) Copyright 2013-2014 David Lanzendörfer <david.lanzendoerfer@o2s.ch> * (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com> * (C) Copyright 2017 Sootech SA - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of - * the License, or (at your option) any later version. */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/io.h> -#include <linux/device.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/err.h> - #include <linux/clk.h> -#include <linux/gpio.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h> -#include <linux/scatterlist.h> +#include <linux/clk/sunxi-ng.h> +#include <linux/delay.h> +#include <linux/device.h> #include <linux/dma-mapping.h> -#include <linux/slab.h> -#include <linux/reset.h> -#include <linux/regulator/consumer.h> - -#include <linux/of_address.h> -#include <linux/of_gpio.h> -#include <linux/of_platform.h> - +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mmc/card.h> +#include <linux/mmc/core.h> #include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> #include <linux/mmc/sdio.h> -#include <linux/mmc/mmc.h> -#include <linux/mmc/core.h> -#include <linux/mmc/card.h> #include <linux/mmc/slot-gpio.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/spinlock.h> /* register offset definitions */ #define SDXC_REG_GCTRL (0x00) /* SMC Global Control Register */ @@ -251,6 +245,7 @@ struct sunxi_idma_des { struct sunxi_mmc_cfg { u32 idma_des_size_bits; + u32 idma_des_shift; const struct sunxi_mmc_clk_delay *clk_delays; /* does the IP block support autocalibration? */ @@ -259,10 +254,20 @@ struct sunxi_mmc_cfg { /* Does DATA0 needs to be masked while the clock is updated */ bool mask_data0; + /* + * hardware only supports new timing mode, either due to lack of + * a mode switch in the clock controller, or the mmc controller + * is permanently configured in the new timing mode, without the + * NTSR mode switch. + */ bool needs_new_timings; + + /* clock hardware can switch between old and new timing modes */ + bool ccu_has_timings_switch; }; struct sunxi_mmc_host { + struct device *dev; struct mmc_host *mmc; struct reset_control *reset; const struct sunxi_mmc_cfg *cfg; @@ -293,6 +298,9 @@ struct sunxi_mmc_host { /* vqmmc */ bool vqmmc_enabled; + + /* timings */ + bool use_new_timings; }; static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host) @@ -313,10 +321,9 @@ static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host) return 0; } -static int sunxi_mmc_init_host(struct mmc_host *mmc) +static int sunxi_mmc_init_host(struct sunxi_mmc_host *host) { u32 rval; - struct sunxi_mmc_host *host = mmc_priv(mmc); if (sunxi_mmc_reset_host(host)) return -EIO; @@ -338,7 +345,7 @@ static int sunxi_mmc_init_host(struct mmc_host *mmc) /* Enable CEATA support */ mmc_writel(host, REG_FUNS, SDXC_CEATA_ON); /* Set DMA descriptor list base address */ - mmc_writel(host, REG_DLBA, host->sg_dma); + mmc_writel(host, REG_DLBA, host->sg_dma >> host->cfg->idma_des_shift); rval = mmc_readl(host, REG_GCTRL); rval |= SDXC_INTERRUPT_ENABLE_BIT; @@ -368,8 +375,11 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host, next_desc += sizeof(struct sunxi_idma_des); pdes[i].buf_addr_ptr1 = - cpu_to_le32(sg_dma_address(&data->sg[i])); - pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc); + cpu_to_le32(sg_dma_address(&data->sg[i]) >> + host->cfg->idma_des_shift); + pdes[i].buf_addr_ptr2 = + cpu_to_le32(next_desc >> + host->cfg->idma_des_shift); } pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD); @@ -714,6 +724,11 @@ static int sunxi_mmc_clk_set_phase(struct sunxi_mmc_host *host, { int index; + /* clk controller delays not used under new timings mode */ + if (host->use_new_timings) + return 0; + + /* some old controllers don't support delays */ if (!host->cfg->clk_delays) return 0; @@ -747,7 +762,7 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, { struct mmc_host *mmc = host->mmc; long rate; - u32 rval, clock = ios->clock; + u32 rval, clock = ios->clock, div = 1; int ret; ret = sunxi_mmc_oclk_onoff(host, 0); @@ -760,10 +775,30 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, if (!ios->clock) return 0; - /* 8 bit DDR requires a higher module clock */ + /* + * Under the old timing mode, 8 bit DDR requires the module + * clock to be double the card clock. Under the new timing + * mode, all DDR modes require a doubled module clock. + * + * We currently only support the standard MMC DDR52 mode. + * This block should be updated once support for other DDR + * modes is added. + */ if (ios->timing == MMC_TIMING_MMC_DDR52 && - ios->bus_width == MMC_BUS_WIDTH_8) + (host->use_new_timings || + ios->bus_width == MMC_BUS_WIDTH_8)) { + div = 2; clock <<= 1; + } + + if (host->use_new_timings && host->cfg->ccu_has_timings_switch) { + ret = sunxi_ccu_set_mmc_timing_mode(host->clk_mmc, true); + if (ret) { + dev_err(mmc_dev(mmc), + "error setting new timing mode\n"); + return ret; + } + } rate = clk_round_rate(host->clk_mmc, clock); if (rate < 0) { @@ -782,24 +817,29 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, return ret; } - /* clear internal divider */ + /* set internal divider */ rval = mmc_readl(host, REG_CLKCR); rval &= ~0xff; - /* set internal divider for 8 bit eMMC DDR, so card clock is right */ - if (ios->timing == MMC_TIMING_MMC_DDR52 && - ios->bus_width == MMC_BUS_WIDTH_8) { - rval |= 1; - rate >>= 1; - } + rval |= div - 1; mmc_writel(host, REG_CLKCR, rval); - if (host->cfg->needs_new_timings) { + /* update card clock rate to account for internal divider */ + rate /= div; + + /* + * Configure the controller to use the new timing mode if needed. + * On controllers that only support the new timing mode, such as + * the eMMC controller on the A64, this register does not exist, + * and any writes to it are ignored. + */ + if (host->use_new_timings) { /* Don't touch the delay bits */ rval = mmc_readl(host, REG_SD_NTSR); rval |= SDXC_2X_TIMING_MODE; mmc_writel(host, REG_SD_NTSR, rval); } + /* sunxi_mmc_clk_set_phase expects the actual card clock rate */ ret = sunxi_mmc_clk_set_phase(host, ios, rate); if (ret) return ret; @@ -826,17 +866,48 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, return 0; } -static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +static void sunxi_mmc_set_bus_width(struct sunxi_mmc_host *host, + unsigned char width) +{ + switch (width) { + case MMC_BUS_WIDTH_1: + mmc_writel(host, REG_WIDTH, SDXC_WIDTH1); + break; + case MMC_BUS_WIDTH_4: + mmc_writel(host, REG_WIDTH, SDXC_WIDTH4); + break; + case MMC_BUS_WIDTH_8: + mmc_writel(host, REG_WIDTH, SDXC_WIDTH8); + break; + } +} + +static void sunxi_mmc_set_clk(struct sunxi_mmc_host *host, struct mmc_ios *ios) { - struct sunxi_mmc_host *host = mmc_priv(mmc); u32 rval; - /* Set the power state */ - switch (ios->power_mode) { - case MMC_POWER_ON: - break; + /* set ddr mode */ + rval = mmc_readl(host, REG_GCTRL); + if (ios->timing == MMC_TIMING_UHS_DDR50 || + ios->timing == MMC_TIMING_MMC_DDR52) + rval |= SDXC_DDR_MODE; + else + rval &= ~SDXC_DDR_MODE; + mmc_writel(host, REG_GCTRL, rval); + + host->ferror = sunxi_mmc_clk_set_rate(host, ios); + /* Android code had a usleep_range(50000, 55000); here */ +} + +static void sunxi_mmc_card_power(struct sunxi_mmc_host *host, + struct mmc_ios *ios) +{ + struct mmc_host *mmc = host->mmc; + switch (ios->power_mode) { case MMC_POWER_UP: + dev_dbg(mmc_dev(mmc), "Powering card up\n"); + if (!IS_ERR(mmc->supply.vmmc)) { host->ferror = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, @@ -854,60 +925,44 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } host->vqmmc_enabled = true; } - - host->ferror = sunxi_mmc_init_host(mmc); - if (host->ferror) - return; - - dev_dbg(mmc_dev(mmc), "power on!\n"); break; case MMC_POWER_OFF: - dev_dbg(mmc_dev(mmc), "power off!\n"); - sunxi_mmc_reset_host(host); + dev_dbg(mmc_dev(mmc), "Powering card off\n"); + if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) regulator_disable(mmc->supply.vqmmc); + host->vqmmc_enabled = false; break; - } - /* set bus width */ - switch (ios->bus_width) { - case MMC_BUS_WIDTH_1: - mmc_writel(host, REG_WIDTH, SDXC_WIDTH1); - break; - case MMC_BUS_WIDTH_4: - mmc_writel(host, REG_WIDTH, SDXC_WIDTH4); - break; - case MMC_BUS_WIDTH_8: - mmc_writel(host, REG_WIDTH, SDXC_WIDTH8); + default: + dev_dbg(mmc_dev(mmc), "Ignoring unknown card power state\n"); break; } +} - /* set ddr mode */ - rval = mmc_readl(host, REG_GCTRL); - if (ios->timing == MMC_TIMING_UHS_DDR50 || - ios->timing == MMC_TIMING_MMC_DDR52) - rval |= SDXC_DDR_MODE; - else - rval &= ~SDXC_DDR_MODE; - mmc_writel(host, REG_GCTRL, rval); +static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sunxi_mmc_host *host = mmc_priv(mmc); - /* set up clock */ - if (ios->power_mode) { - host->ferror = sunxi_mmc_clk_set_rate(host, ios); - /* Android code had a usleep_range(50000, 55000); here */ - } + sunxi_mmc_card_power(host, ios); + sunxi_mmc_set_bus_width(host, ios->bus_width); + sunxi_mmc_set_clk(host, ios); } static int sunxi_mmc_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) { + int ret; + /* vqmmc regulator is available */ - if (!IS_ERR(mmc->supply.vqmmc)) - return mmc_regulator_set_vqmmc(mmc, ios); + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + return ret < 0 ? ret : 0; + } /* no vqmmc regulator, assume fixed regulator at 3/3.3V */ if (mmc->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) @@ -922,6 +977,9 @@ static void sunxi_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) unsigned long flags; u32 imask; + if (enable) + pm_runtime_get_noresume(host->dev); + spin_lock_irqsave(&host->lock, flags); imask = mmc_readl(host, REG_IMASK); @@ -934,6 +992,9 @@ static void sunxi_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) } mmc_writel(host, REG_IMASK, imask); spin_unlock_irqrestore(&host->lock, flags); + + if (!enable) + pm_runtime_put_noidle(host->mmc->parent); } static void sunxi_mmc_hw_reset(struct mmc_host *mmc) @@ -1048,14 +1109,14 @@ static int sunxi_mmc_card_busy(struct mmc_host *mmc) return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY); } -static struct mmc_host_ops sunxi_mmc_ops = { +static const struct mmc_host_ops sunxi_mmc_ops = { .request = sunxi_mmc_request, .set_ios = sunxi_mmc_set_ios, .get_ro = mmc_gpio_get_ro, .get_cd = mmc_gpio_get_cd, .enable_sdio_irq = sunxi_mmc_enable_sdio_irq, .start_signal_voltage_switch = sunxi_mmc_volt_switch, - .hw_reset = sunxi_mmc_hw_reset, + .card_hw_reset = sunxi_mmc_hw_reset, .card_busy = sunxi_mmc_card_busy, }; @@ -1094,12 +1155,27 @@ static const struct sunxi_mmc_cfg sun7i_a20_cfg = { .can_calibrate = false, }; +static const struct sunxi_mmc_cfg sun8i_a83t_emmc_cfg = { + .idma_des_size_bits = 16, + .clk_delays = sunxi_mmc_clk_delays, + .can_calibrate = false, + .ccu_has_timings_switch = true, +}; + static const struct sunxi_mmc_cfg sun9i_a80_cfg = { .idma_des_size_bits = 16, .clk_delays = sun9i_mmc_clk_delays, .can_calibrate = false, }; +static const struct sunxi_mmc_cfg sun20i_d1_cfg = { + .idma_des_size_bits = 13, + .idma_des_shift = 2, + .can_calibrate = true, + .mask_data0 = true, + .needs_new_timings = true, +}; + static const struct sunxi_mmc_cfg sun50i_a64_cfg = { .idma_des_size_bits = 16, .clk_delays = NULL, @@ -1112,19 +1188,115 @@ static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = { .idma_des_size_bits = 13, .clk_delays = NULL, .can_calibrate = true, + .needs_new_timings = true, +}; + +static const struct sunxi_mmc_cfg sun50i_h616_cfg = { + .idma_des_size_bits = 16, + .idma_des_shift = 2, + .can_calibrate = true, + .mask_data0 = true, + .needs_new_timings = true, +}; + +static const struct sunxi_mmc_cfg sun50i_a100_emmc_cfg = { + .idma_des_size_bits = 13, + .idma_des_shift = 2, + .clk_delays = NULL, + .can_calibrate = true, + .needs_new_timings = true, }; static const struct of_device_id sunxi_mmc_of_match[] = { { .compatible = "allwinner,sun4i-a10-mmc", .data = &sun4i_a10_cfg }, { .compatible = "allwinner,sun5i-a13-mmc", .data = &sun5i_a13_cfg }, { .compatible = "allwinner,sun7i-a20-mmc", .data = &sun7i_a20_cfg }, + { .compatible = "allwinner,sun8i-a83t-emmc", .data = &sun8i_a83t_emmc_cfg }, { .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg }, + { .compatible = "allwinner,sun20i-d1-mmc", .data = &sun20i_d1_cfg }, { .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg }, { .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg }, + { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun20i_d1_cfg }, + { .compatible = "allwinner,sun50i-a100-emmc", .data = &sun50i_a100_emmc_cfg }, + { .compatible = "allwinner,sun50i-h616-mmc", .data = &sun50i_h616_cfg }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); +static int sunxi_mmc_enable(struct sunxi_mmc_host *host) +{ + int ret; + + if (!IS_ERR(host->reset)) { + ret = reset_control_reset(host->reset); + if (ret) { + dev_err(host->dev, "Couldn't reset the MMC controller (%d)\n", + ret); + return ret; + } + } + + ret = clk_prepare_enable(host->clk_ahb); + if (ret) { + dev_err(host->dev, "Couldn't enable the bus clocks (%d)\n", ret); + goto error_assert_reset; + } + + ret = clk_prepare_enable(host->clk_mmc); + if (ret) { + dev_err(host->dev, "Enable mmc clk err %d\n", ret); + goto error_disable_clk_ahb; + } + + ret = clk_prepare_enable(host->clk_output); + if (ret) { + dev_err(host->dev, "Enable output clk err %d\n", ret); + goto error_disable_clk_mmc; + } + + ret = clk_prepare_enable(host->clk_sample); + if (ret) { + dev_err(host->dev, "Enable sample clk err %d\n", ret); + goto error_disable_clk_output; + } + + /* + * Sometimes the controller asserts the irq on boot for some reason, + * make sure the controller is in a sane state before enabling irqs. + */ + ret = sunxi_mmc_reset_host(host); + if (ret) + goto error_disable_clk_sample; + + return 0; + +error_disable_clk_sample: + clk_disable_unprepare(host->clk_sample); +error_disable_clk_output: + clk_disable_unprepare(host->clk_output); +error_disable_clk_mmc: + clk_disable_unprepare(host->clk_mmc); +error_disable_clk_ahb: + clk_disable_unprepare(host->clk_ahb); +error_assert_reset: + if (!IS_ERR(host->reset)) + reset_control_assert(host->reset); + return ret; +} + +static void sunxi_mmc_disable(struct sunxi_mmc_host *host) +{ + sunxi_mmc_reset_host(host); + + clk_disable_unprepare(host->clk_sample); + clk_disable_unprepare(host->clk_output); + clk_disable_unprepare(host->clk_mmc); + clk_disable_unprepare(host->clk_ahb); + + if (!IS_ERR(host->reset)) + reset_control_assert(host->reset); +} + static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, struct platform_device *pdev) { @@ -1135,14 +1307,10 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, return -EINVAL; ret = mmc_regulator_get_supply(host->mmc); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "Could not get vmmc supply\n"); + if (ret) return ret; - } - host->reg_base = devm_ioremap_resource(&pdev->dev, - platform_get_resource(pdev, IORESOURCE_MEM, 0)); + host->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->reg_base)) return PTR_ERR(host->reg_base); @@ -1172,65 +1340,26 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, } } - host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); + host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev, + "ahb"); if (PTR_ERR(host->reset) == -EPROBE_DEFER) return PTR_ERR(host->reset); - ret = clk_prepare_enable(host->clk_ahb); - if (ret) { - dev_err(&pdev->dev, "Enable ahb clk err %d\n", ret); + ret = sunxi_mmc_enable(host); + if (ret) return ret; - } - - ret = clk_prepare_enable(host->clk_mmc); - if (ret) { - dev_err(&pdev->dev, "Enable mmc clk err %d\n", ret); - goto error_disable_clk_ahb; - } - ret = clk_prepare_enable(host->clk_output); - if (ret) { - dev_err(&pdev->dev, "Enable output clk err %d\n", ret); - goto error_disable_clk_mmc; - } - - ret = clk_prepare_enable(host->clk_sample); - if (ret) { - dev_err(&pdev->dev, "Enable sample clk err %d\n", ret); - goto error_disable_clk_output; - } - - if (!IS_ERR(host->reset)) { - ret = reset_control_deassert(host->reset); - if (ret) { - dev_err(&pdev->dev, "reset err %d\n", ret); - goto error_disable_clk_sample; - } + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) { + ret = host->irq; + goto error_disable_mmc; } - /* - * Sometimes the controller asserts the irq on boot for some reason, - * make sure the controller is in a sane state before enabling irqs. - */ - ret = sunxi_mmc_reset_host(host); - if (ret) - goto error_assert_reset; - - host->irq = platform_get_irq(pdev, 0); return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq, sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host); -error_assert_reset: - if (!IS_ERR(host->reset)) - reset_control_assert(host->reset); -error_disable_clk_sample: - clk_disable_unprepare(host->clk_sample); -error_disable_clk_output: - clk_disable_unprepare(host->clk_output); -error_disable_clk_mmc: - clk_disable_unprepare(host->clk_mmc); -error_disable_clk_ahb: - clk_disable_unprepare(host->clk_ahb); +error_disable_mmc: + sunxi_mmc_disable(host); return ret; } @@ -1240,26 +1369,49 @@ static int sunxi_mmc_probe(struct platform_device *pdev) struct mmc_host *mmc; int ret; - mmc = mmc_alloc_host(sizeof(struct sunxi_mmc_host), &pdev->dev); - if (!mmc) { - dev_err(&pdev->dev, "mmc alloc host failed\n"); - return -ENOMEM; - } + mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(*host)); + if (!mmc) + return dev_err_probe(&pdev->dev, -ENOMEM, + "mmc alloc host failed\n"); + platform_set_drvdata(pdev, mmc); host = mmc_priv(mmc); + host->dev = &pdev->dev; host->mmc = mmc; spin_lock_init(&host->lock); ret = sunxi_mmc_resource_request(host, pdev); if (ret) - goto error_free_host; + return ret; host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); - if (!host->sg_cpu) { - dev_err(&pdev->dev, "Failed to allocate DMA descriptor mem\n"); - ret = -ENOMEM; - goto error_free_host; + if (!host->sg_cpu) + return dev_err_probe(&pdev->dev, -ENOMEM, + "Failed to allocate DMA descriptor mem\n"); + + if (host->cfg->ccu_has_timings_switch) { + /* + * Supports both old and new timing modes. + * Try setting the clk to new timing mode. + */ + sunxi_ccu_set_mmc_timing_mode(host->clk_mmc, true); + + /* And check the result */ + ret = sunxi_ccu_get_mmc_timing_mode(host->clk_mmc); + if (ret < 0) { + /* + * For whatever reason we were not able to get + * the current active mode. Default to old mode. + */ + dev_warn(&pdev->dev, "MMC clk timing mode unknown\n"); + host->use_new_timings = false; + } else { + host->use_new_timings = !!ret; + } + } else if (host->cfg->needs_new_timings) { + /* Supports new timing mode only */ + host->use_new_timings = true; } mmc->ops = &sunxi_mmc_ops; @@ -1272,57 +1424,123 @@ static int sunxi_mmc_probe(struct platform_device *pdev) mmc->f_min = 400000; mmc->f_max = 52000000; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | - MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; + MMC_CAP_SDIO_IRQ; - if (host->cfg->clk_delays) - mmc->caps |= MMC_CAP_1_8V_DDR; + /* + * Some H5 devices do not have signal traces precise enough to + * use HS DDR mode for their eMMC chips. + * + * We still enable HS DDR modes for all the other controller + * variants that support them. + */ + if ((host->cfg->clk_delays || host->use_new_timings) && + !of_device_is_compatible(pdev->dev.of_node, + "allwinner,sun50i-h5-emmc")) + mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; ret = mmc_of_parse(mmc); if (ret) goto error_free_dma; + /* + * If we don't support delay chains in the SoC, we can't use any + * of the higher speed modes. Mask them out in case the device + * tree specifies the properties for them, which gets added to + * the caps by mmc_of_parse() above. + */ + if (!(host->cfg->clk_delays || host->use_new_timings)) { + mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | + MMC_CAP_1_2V_DDR | MMC_CAP_UHS); + mmc->caps2 &= ~MMC_CAP2_HS200; + } + + /* TODO: This driver doesn't support HS400 mode yet */ + mmc->caps2 &= ~MMC_CAP2_HS400; + + ret = sunxi_mmc_init_host(host); + if (ret) + goto error_free_dma; + + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + ret = mmc_add_host(mmc); if (ret) goto error_free_dma; - dev_info(&pdev->dev, "base:0x%p irq:%u\n", host->reg_base, host->irq); - platform_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, "initialized, max. request size: %u KB%s\n", + mmc->max_req_size >> 10, + host->use_new_timings ? ", uses new timings mode" : ""); + return 0; error_free_dma: dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); -error_free_host: - mmc_free_host(mmc); return ret; } -static int sunxi_mmc_remove(struct platform_device *pdev) +static void sunxi_mmc_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct sunxi_mmc_host *host = mmc_priv(mmc); mmc_remove_host(mmc); - disable_irq(host->irq); - sunxi_mmc_reset_host(host); + pm_runtime_disable(&pdev->dev); + if (!pm_runtime_status_suspended(&pdev->dev)) { + disable_irq(host->irq); + sunxi_mmc_disable(host); + } + dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); +} - if (!IS_ERR(host->reset)) - reset_control_assert(host->reset); +static int sunxi_mmc_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct sunxi_mmc_host *host = mmc_priv(mmc); + int ret; - clk_disable_unprepare(host->clk_sample); - clk_disable_unprepare(host->clk_output); - clk_disable_unprepare(host->clk_mmc); - clk_disable_unprepare(host->clk_ahb); + ret = sunxi_mmc_enable(host); + if (ret) + return ret; - dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); - mmc_free_host(mmc); + sunxi_mmc_init_host(host); + sunxi_mmc_set_bus_width(host, mmc->ios.bus_width); + sunxi_mmc_set_clk(host, &mmc->ios); + enable_irq(host->irq); return 0; } +static int sunxi_mmc_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct sunxi_mmc_host *host = mmc_priv(mmc); + + /* + * When clocks are off, it's possible receiving + * fake interrupts, which will stall the system. + * Disabling the irq will prevent this. + */ + disable_irq(host->irq); + sunxi_mmc_reset_host(host); + sunxi_mmc_disable(host); + + return 0; +} + +static const struct dev_pm_ops sunxi_mmc_pm_ops = { + SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) + RUNTIME_PM_OPS(sunxi_mmc_runtime_suspend, sunxi_mmc_runtime_resume, NULL) +}; + static struct platform_driver sunxi_mmc_driver = { .driver = { .name = "sunxi-mmc", - .of_match_table = of_match_ptr(sunxi_mmc_of_match), + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sunxi_mmc_of_match, + .pm = pm_ptr(&sunxi_mmc_pm_ops), }, .probe = sunxi_mmc_probe, .remove = sunxi_mmc_remove, @@ -1331,5 +1549,5 @@ module_platform_driver(sunxi_mmc_driver); MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver"); MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("David Lanzend�rfer <david.lanzendoerfer@o2s.ch>"); +MODULE_AUTHOR("David Lanzendörfer <david.lanzendoerfer@o2s.ch>"); MODULE_ALIAS("platform:sunxi-mmc"); |
