summaryrefslogtreecommitdiff
path: root/drivers/mmc/host/mmci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host/mmci.c')
-rw-r--r--drivers/mmc/host/mmci.c1640
1 files changed, 1174 insertions, 466 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index d1ca2f489054..e500051bd572 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
* Copyright (C) 2010 ST-Ericsson SA
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -21,15 +18,16 @@
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/log2.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/pm.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/sd.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -37,106 +35,77 @@
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/reset.h>
+#include <linux/gpio/consumer.h>
+#include <linux/workqueue.h>
#include <asm/div64.h>
#include <asm/io.h>
#include "mmci.h"
-#include "mmci_qcom_dml.h"
#define DRIVER_NAME "mmci-pl18x"
-static unsigned int fmax = 515633;
+static void mmci_variant_init(struct mmci_host *host);
+static void ux500_variant_init(struct mmci_host *host);
+static void ux500v2_variant_init(struct mmci_host *host);
-/**
- * struct variant_data - MMCI variant-specific quirks
- * @clkreg: default value for MCICLOCK register
- * @clkreg_enable: enable value for MMCICLOCK register
- * @clkreg_8bit_bus_enable: enable value for 8 bit bus
- * @clkreg_neg_edge_enable: enable value for inverted data/cmd output
- * @datalength_bits: number of bits in the MMCIDATALENGTH register
- * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
- * is asserted (likewise for RX)
- * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
- * is asserted (likewise for RX)
- * @data_cmd_enable: enable value for data commands.
- * @st_sdio: enable ST specific SDIO logic
- * @st_clkdiv: true if using a ST-specific clock divider algorithm
- * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
- * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
- * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
- * register
- * @datactrl_mask_sdio: SDIO enable mask in datactrl register
- * @pwrreg_powerup: power up value for MMCIPOWER register
- * @f_max: maximum clk frequency supported by the controller.
- * @signal_direction: input/out direction of bus signals can be indicated
- * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
- * @busy_detect: true if the variant supports busy detection on DAT0.
- * @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
- * @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
- * indicating that the card is busy
- * @busy_detect_mask: bitmask identifying the bit in the MMCIMASK0 to mask for
- * getting busy end detection interrupts
- * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
- * @explicit_mclk_control: enable explicit mclk control in driver.
- * @qcom_fifo: enables qcom specific fifo pio read logic.
- * @qcom_dml: enables qcom specific dma glue for dma transfers.
- * @reversed_irq_handling: handle data irq before cmd irq.
- */
-struct variant_data {
- unsigned int clkreg;
- unsigned int clkreg_enable;
- unsigned int clkreg_8bit_bus_enable;
- unsigned int clkreg_neg_edge_enable;
- unsigned int datalength_bits;
- unsigned int fifosize;
- unsigned int fifohalfsize;
- unsigned int data_cmd_enable;
- unsigned int datactrl_mask_ddrmode;
- unsigned int datactrl_mask_sdio;
- bool st_sdio;
- bool st_clkdiv;
- bool blksz_datactrl16;
- bool blksz_datactrl4;
- u32 pwrreg_powerup;
- u32 f_max;
- bool signal_direction;
- bool pwrreg_clkgate;
- bool busy_detect;
- u32 busy_dpsm_flag;
- u32 busy_detect_flag;
- u32 busy_detect_mask;
- bool pwrreg_nopower;
- bool explicit_mclk_control;
- bool qcom_fifo;
- bool qcom_dml;
- bool reversed_irq_handling;
-};
+static unsigned int fmax = 515633;
static struct variant_data variant_arm = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
+ .datactrl_blocksz = 11,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.reversed_irq_handling = true,
+ .mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_arm_extended_fifo = {
.fifosize = 128 * 4,
.fifohalfsize = 64 * 4,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
+ .datactrl_blocksz = 11,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_arm_extended_fifo_hwfc = {
.fifosize = 128 * 4,
.fifohalfsize = 64 * 4,
.clkreg_enable = MCI_ARM_HWFCEN,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
+ .datactrl_blocksz = 11,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_u300 = {
@@ -144,7 +113,12 @@ static struct variant_data variant_u300 = {
.fifohalfsize = 8 * 4,
.clkreg_enable = MCI_ST_U300_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
+ .datactrl_blocksz = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.pwrreg_powerup = MCI_PWR_ON,
@@ -152,6 +126,11 @@ static struct variant_data variant_u300 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_nomadik = {
@@ -159,7 +138,12 @@ static struct variant_data variant_nomadik = {
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
+ .datactrl_blocksz = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -168,6 +152,11 @@ static struct variant_data variant_nomadik = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_ux500 = {
@@ -177,7 +166,14 @@ static struct variant_data variant_ux500 = {
.clkreg_enable = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
+ .datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -190,6 +186,11 @@ static struct variant_data variant_ux500 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
+ .init = ux500_variant_init,
};
static struct variant_data variant_ux500v2 = {
@@ -199,12 +200,18 @@ static struct variant_data variant_ux500v2 = {
.clkreg_enable = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
.datalength_bits = 24,
+ .datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
- .blksz_datactrl16 = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 100000000,
.signal_direction = true,
@@ -214,6 +221,122 @@ static struct variant_data variant_ux500v2 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
+ .init = ux500v2_variant_init,
+};
+
+static struct variant_data variant_stm32 = {
+ .fifosize = 32 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
+ .datalength_bits = 24,
+ .datactrl_blocksz = 11,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
+ .st_sdio = true,
+ .st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .f_max = 48000000,
+ .pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
+ .dma_flow_controller = true,
+ .init = mmci_variant_init,
+};
+
+static struct variant_data variant_stm32_sdmmc = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
+ .f_max = 208000000,
+ .stm32_clkdiv = true,
+ .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
+ .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
+ .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
+ .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
+ .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
+ .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
+ .datactrl_first = true,
+ .datacnt_useless = true,
+ .datalength_bits = 25,
+ .datactrl_blocksz = 14,
+ .datactrl_any_blocksz = true,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
+ .stm32_idmabsize_mask = GENMASK(12, 5),
+ .stm32_idmabsize_align = BIT(5),
+ .supports_sdio_irq = true,
+ .busy_timeout = true,
+ .busy_detect = true,
+ .busy_detect_flag = MCI_STM32_BUSYD0,
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
+ .init = sdmmc_variant_init,
+};
+
+static struct variant_data variant_stm32_sdmmcv2 = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
+ .f_max = 267000000,
+ .stm32_clkdiv = true,
+ .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
+ .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
+ .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
+ .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
+ .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
+ .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
+ .datactrl_first = true,
+ .datacnt_useless = true,
+ .datalength_bits = 25,
+ .datactrl_blocksz = 14,
+ .datactrl_any_blocksz = true,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
+ .stm32_idmabsize_mask = GENMASK(16, 5),
+ .stm32_idmabsize_align = BIT(5),
+ .supports_sdio_irq = true,
+ .dma_lli = true,
+ .busy_timeout = true,
+ .busy_detect = true,
+ .busy_detect_flag = MCI_STM32_BUSYD0,
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
+ .init = sdmmc_variant_init,
+};
+
+static struct variant_data variant_stm32_sdmmcv3 = {
+ .fifosize = 256 * 4,
+ .fifohalfsize = 128 * 4,
+ .f_max = 267000000,
+ .stm32_clkdiv = true,
+ .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
+ .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
+ .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
+ .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
+ .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
+ .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
+ .datactrl_first = true,
+ .datacnt_useless = true,
+ .datalength_bits = 25,
+ .datactrl_blocksz = 14,
+ .datactrl_any_blocksz = true,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
+ .stm32_idmabsize_mask = GENMASK(16, 6),
+ .stm32_idmabsize_align = BIT(6),
+ .supports_sdio_irq = true,
+ .dma_lli = true,
+ .busy_timeout = true,
+ .busy_detect = true,
+ .busy_detect_flag = MCI_STM32_BUSYD0,
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
+ .init = sdmmc_variant_init,
};
static struct variant_data variant_qcom = {
@@ -224,14 +347,24 @@ static struct variant_data variant_qcom = {
MCI_QCOM_CLK_SELECT_IN_FBCLK,
.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
.datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
- .blksz_datactrl4 = true,
.datalength_bits = 24,
+ .datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 208000000,
.explicit_mclk_control = true,
.qcom_fifo = true,
.qcom_dml = true,
+ .mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
+ .init = qcom_variant_init,
};
/* Busy detection for the ST Micro variant */
@@ -249,24 +382,6 @@ static int mmci_card_busy(struct mmc_host *mmc)
return busy;
}
-/*
- * Validate mmc prerequisites
- */
-static int mmci_validate_data(struct mmci_host *host,
- struct mmc_data *data)
-{
- if (!data)
- return 0;
-
- if (!is_power_of_2(data->blksz)) {
- dev_err(mmc_dev(host->mmc),
- "unsupported block size (%d bytes)\n", data->blksz);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void mmci_reg_delay(struct mmci_host *host)
{
/*
@@ -285,7 +400,7 @@ static void mmci_reg_delay(struct mmci_host *host)
/*
* This must be called with host->lock held
*/
-static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
+void mmci_write_clkreg(struct mmci_host *host, u32 clk)
{
if (host->clk_reg != clk) {
host->clk_reg = clk;
@@ -296,7 +411,7 @@ static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
/*
* This must be called with host->lock held
*/
-static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
+void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
{
if (host->pwr_reg != pwr) {
host->pwr_reg = pwr;
@@ -309,8 +424,9 @@ static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
*/
static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
{
- /* Keep busy mode in DPSM if enabled */
- datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
+ /* Keep busy mode in DPSM and SDIO mask if enabled */
+ datactrl |= host->datactrl_reg & (host->variant->busy_dpsm_flag |
+ host->variant->datactrl_mask_sdio);
if (host->datactrl_reg != datactrl) {
host->datactrl_reg = datactrl;
@@ -380,6 +496,138 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
mmci_write_clkreg(host, clk);
}
+static void mmci_dma_release(struct mmci_host *host)
+{
+ if (host->ops && host->ops->dma_release)
+ host->ops->dma_release(host);
+
+ host->use_dma = false;
+}
+
+static void mmci_dma_setup(struct mmci_host *host)
+{
+ if (!host->ops || !host->ops->dma_setup)
+ return;
+
+ if (host->ops->dma_setup(host))
+ return;
+
+ /* initialize pre request cookie */
+ host->next_cookie = 1;
+
+ host->use_dma = true;
+}
+
+/*
+ * Validate mmc prerequisites
+ */
+static int mmci_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ struct variant_data *variant = host->variant;
+
+ if (!data)
+ return 0;
+ if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported block size (%d bytes)\n", data->blksz);
+ return -EINVAL;
+ }
+
+ if (host->ops && host->ops->validate_data)
+ return host->ops->validate_data(host, data);
+
+ return 0;
+}
+
+static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
+{
+ int err;
+
+ if (!host->ops || !host->ops->prep_data)
+ return 0;
+
+ err = host->ops->prep_data(host, data, next);
+
+ if (next && !err)
+ data->host_cookie = ++host->next_cookie < 0 ?
+ 1 : host->next_cookie;
+
+ return err;
+}
+
+static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
+ int err)
+{
+ if (host->ops && host->ops->unprep_data)
+ host->ops->unprep_data(host, data, err);
+
+ data->host_cookie = 0;
+}
+
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+{
+ WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
+
+ if (host->ops && host->ops->get_next_data)
+ host->ops->get_next_data(host, data);
+}
+
+static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
+{
+ struct mmc_data *data = host->data;
+ int ret;
+
+ if (!host->use_dma)
+ return -EINVAL;
+
+ ret = mmci_prep_data(host, data, false);
+ if (ret)
+ return ret;
+
+ if (!host->ops || !host->ops->dma_start)
+ return -EINVAL;
+
+ /* Okay, go for it. */
+ dev_vdbg(mmc_dev(host->mmc),
+ "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
+ data->sg_len, data->blksz, data->blocks, data->flags);
+
+ ret = host->ops->dma_start(host, &datactrl);
+ if (ret)
+ return ret;
+
+ /* Trigger the DMA transfer */
+ mmci_write_datactrlreg(host, datactrl);
+
+ /*
+ * Let the MMCI say when the data is ended and it's time
+ * to fire next DMA request. When that happens, MMCI will
+ * call mmci_data_end()
+ */
+ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
+ host->base + MMCIMASK0);
+ return 0;
+}
+
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+{
+ if (!host->use_dma)
+ return;
+
+ if (host->ops && host->ops->dma_finalize)
+ host->ops->dma_finalize(host, data);
+}
+
+static void mmci_dma_error(struct mmci_host *host)
+{
+ if (!host->use_dma)
+ return;
+
+ if (host->ops && host->ops->dma_error)
+ host->ops->dma_error(host);
+}
+
static void
mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
{
@@ -396,17 +644,21 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
{
void __iomem *base = host->base;
+ struct variant_data *variant = host->variant;
if (host->singleirq) {
unsigned int mask0 = readl(base + MMCIMASK0);
- mask0 &= ~MCI_IRQ1MASK;
+ mask0 &= ~variant->irq_pio_mask;
mask0 |= mask;
writel(mask0, base + MMCIMASK0);
}
- writel(mask, base + MMCIMASK1);
+ if (variant->mmcimask1)
+ writel(mask, base + MMCIMASK1);
+
+ host->mask1_reg = mask;
}
static void mmci_stop_data(struct mmci_host *host)
@@ -428,38 +680,208 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
+static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
+{
+ return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
+}
+
+static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
+{
+ return MCI_DPSM_ENABLE | (host->data->blksz << 16);
+}
+
+static void ux500_busy_clear_mask_done(struct mmci_host *host)
+{
+ void __iomem *base = host->base;
+
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ writel(readl(base + MMCIMASK0) &
+ ~host->variant->busy_detect_mask, base + MMCIMASK0);
+ host->busy_state = MMCI_BUSY_DONE;
+ host->busy_status = 0;
+}
+
+/*
+ * ux500_busy_complete() - this will wait until the busy status
+ * goes off, saving any status that occur in the meantime into
+ * host->busy_status until we know the card is not busy any more.
+ * The function returns true when the busy detection is ended
+ * and we should continue processing the command.
+ *
+ * The Ux500 typically fires two IRQs over a busy cycle like this:
+ *
+ * DAT0 busy +-----------------+
+ * | |
+ * DAT0 not busy ----+ +--------
+ *
+ * ^ ^
+ * | |
+ * IRQ1 IRQ2
+ */
+static bool ux500_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
+ u32 status, u32 err_msk)
+{
+ void __iomem *base = host->base;
+ int retries = 10;
+
+ if (status & err_msk) {
+ /* Stop any ongoing busy detection if an error occurs */
+ ux500_busy_clear_mask_done(host);
+ goto out_ret_state;
+ }
+
+ /*
+ * The state transitions are encoded in a state machine crossing
+ * the edges in this switch statement.
+ */
+ switch (host->busy_state) {
+
+ /*
+ * Before unmasking for the busy end IRQ, confirm that the
+ * command was sent successfully. To keep track of having a
+ * command in-progress, waiting for busy signaling to end,
+ * store the status in host->busy_status.
+ *
+ * Note that, the card may need a couple of clock cycles before
+ * it starts signaling busy on DAT0, hence re-read the
+ * MMCISTATUS register here, to allow the busy bit to be set.
+ */
+ case MMCI_BUSY_DONE:
+ /*
+ * Save the first status register read to be sure to catch
+ * all bits that may be lost will retrying. If the command
+ * is still busy this will result in assigning 0 to
+ * host->busy_status, which is what it should be in IDLE.
+ */
+ host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ while (retries) {
+ status = readl(base + MMCISTATUS);
+ /* Keep accumulating status bits */
+ host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ if (status & host->variant->busy_detect_flag) {
+ writel(readl(base + MMCIMASK0) |
+ host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ host->busy_state = MMCI_BUSY_WAITING_FOR_START_IRQ;
+ schedule_delayed_work(&host->ux500_busy_timeout_work,
+ msecs_to_jiffies(cmd->busy_timeout));
+ goto out_ret_state;
+ }
+ retries--;
+ }
+ dev_dbg(mmc_dev(host->mmc),
+ "no busy signalling in time CMD%02x\n", cmd->opcode);
+ ux500_busy_clear_mask_done(host);
+ break;
+
+ /*
+ * If there is a command in-progress that has been successfully
+ * sent, then bail out if busy status is set and wait for the
+ * busy end IRQ.
+ *
+ * Note that, the HW triggers an IRQ on both edges while
+ * monitoring DAT0 for busy completion, but there is only one
+ * status bit in MMCISTATUS for the busy state. Therefore
+ * both the start and the end interrupts needs to be cleared,
+ * one after the other. So, clear the busy start IRQ here.
+ */
+ case MMCI_BUSY_WAITING_FOR_START_IRQ:
+ if (status & host->variant->busy_detect_flag) {
+ host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ host->busy_state = MMCI_BUSY_WAITING_FOR_END_IRQ;
+ } else {
+ dev_dbg(mmc_dev(host->mmc),
+ "lost busy status when waiting for busy start IRQ CMD%02x\n",
+ cmd->opcode);
+ cancel_delayed_work(&host->ux500_busy_timeout_work);
+ ux500_busy_clear_mask_done(host);
+ }
+ break;
+
+ case MMCI_BUSY_WAITING_FOR_END_IRQ:
+ if (!(status & host->variant->busy_detect_flag)) {
+ host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ cancel_delayed_work(&host->ux500_busy_timeout_work);
+ ux500_busy_clear_mask_done(host);
+ } else {
+ dev_dbg(mmc_dev(host->mmc),
+ "busy status still asserted when handling busy end IRQ - will keep waiting CMD%02x\n",
+ cmd->opcode);
+ }
+ break;
+
+ default:
+ dev_dbg(mmc_dev(host->mmc), "fell through on state %d, CMD%02x\n",
+ host->busy_state, cmd->opcode);
+ break;
+ }
+
+out_ret_state:
+ return (host->busy_state == MMCI_BUSY_DONE);
+}
+
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
* no custom DMA interfaces are supported.
*/
#ifdef CONFIG_DMA_ENGINE
-static void mmci_dma_setup(struct mmci_host *host)
+struct mmci_dmae_next {
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan;
+};
+
+struct mmci_dmae_priv {
+ struct dma_chan *cur;
+ struct dma_chan *rx_channel;
+ struct dma_chan *tx_channel;
+ struct dma_async_tx_descriptor *desc_current;
+ struct mmci_dmae_next next_data;
+};
+
+int mmci_dmae_setup(struct mmci_host *host)
{
const char *rxname, *txname;
- struct variant_data *variant = host->variant;
+ struct mmci_dmae_priv *dmae;
+
+ dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
+ if (!dmae)
+ return -ENOMEM;
- host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
- host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
+ host->dma_priv = dmae;
- /* initialize pre request cookie */
- host->next_data.cookie = 1;
+ dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
+ if (IS_ERR(dmae->rx_channel)) {
+ int ret = PTR_ERR(dmae->rx_channel);
+ dmae->rx_channel = NULL;
+ return ret;
+ }
+
+ dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
+ if (IS_ERR(dmae->tx_channel)) {
+ if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
+ dev_warn(mmc_dev(host->mmc),
+ "Deferred probe for TX channel ignored\n");
+ dmae->tx_channel = NULL;
+ }
/*
* If only an RX channel is specified, the driver will
- * attempt to use it bidirectionally, however if it is
+ * attempt to use it bidirectionally, however if it
* is specified but cannot be located, DMA will be disabled.
*/
- if (host->dma_rx_channel && !host->dma_tx_channel)
- host->dma_tx_channel = host->dma_rx_channel;
+ if (dmae->rx_channel && !dmae->tx_channel)
+ dmae->tx_channel = dmae->rx_channel;
- if (host->dma_rx_channel)
- rxname = dma_chan_name(host->dma_rx_channel);
+ if (dmae->rx_channel)
+ rxname = dma_chan_name(dmae->rx_channel);
else
rxname = "none";
- if (host->dma_tx_channel)
- txname = dma_chan_name(host->dma_tx_channel);
+ if (dmae->tx_channel)
+ txname = dma_chan_name(dmae->tx_channel);
else
txname = "none";
@@ -470,67 +892,84 @@ static void mmci_dma_setup(struct mmci_host *host)
* Limit the maximum segment size in any SG entry according to
* the parameters of the DMA engine device.
*/
- if (host->dma_tx_channel) {
- struct device *dev = host->dma_tx_channel->device->dev;
+ if (dmae->tx_channel) {
+ struct device *dev = dmae->tx_channel->device->dev;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
if (max_seg_size < host->mmc->max_seg_size)
host->mmc->max_seg_size = max_seg_size;
}
- if (host->dma_rx_channel) {
- struct device *dev = host->dma_rx_channel->device->dev;
+ if (dmae->rx_channel) {
+ struct device *dev = dmae->rx_channel->device->dev;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
if (max_seg_size < host->mmc->max_seg_size)
host->mmc->max_seg_size = max_seg_size;
}
- if (variant->qcom_dml && host->dma_rx_channel && host->dma_tx_channel)
- if (dml_hw_init(host, host->mmc->parent->of_node))
- variant->qcom_dml = false;
+ if (!dmae->tx_channel || !dmae->rx_channel) {
+ mmci_dmae_release(host);
+ return -EINVAL;
+ }
+
+ return 0;
}
/*
* This is used in or so inline it
* so it can be discarded.
*/
-static inline void mmci_dma_release(struct mmci_host *host)
+void mmci_dmae_release(struct mmci_host *host)
{
- if (host->dma_rx_channel)
- dma_release_channel(host->dma_rx_channel);
- if (host->dma_tx_channel)
- dma_release_channel(host->dma_tx_channel);
- host->dma_rx_channel = host->dma_tx_channel = NULL;
-}
+ struct mmci_dmae_priv *dmae = host->dma_priv;
-static void mmci_dma_data_error(struct mmci_host *host)
-{
- dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
- dmaengine_terminate_all(host->dma_current);
- host->dma_in_progress = false;
- host->dma_current = NULL;
- host->dma_desc_current = NULL;
- host->data->host_cookie = 0;
+ if (dmae->rx_channel)
+ dma_release_channel(dmae->rx_channel);
+ if (dmae->tx_channel)
+ dma_release_channel(dmae->tx_channel);
+ dmae->rx_channel = dmae->tx_channel = NULL;
}
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
+ struct mmci_dmae_priv *dmae = host->dma_priv;
struct dma_chan *chan;
if (data->flags & MMC_DATA_READ)
- chan = host->dma_rx_channel;
+ chan = dmae->rx_channel;
else
- chan = host->dma_tx_channel;
+ chan = dmae->tx_channel;
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
-static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+void mmci_dmae_error(struct mmci_host *host)
+{
+ struct mmci_dmae_priv *dmae = host->dma_priv;
+
+ if (!dma_inprogress(host))
+ return;
+
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(dmae->cur);
+ host->dma_in_progress = false;
+ dmae->cur = NULL;
+ dmae->desc_current = NULL;
+ host->data->host_cookie = 0;
+
+ mmci_dma_unmap(host, host->data);
+}
+
+void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
{
+ struct mmci_dmae_priv *dmae = host->dma_priv;
u32 status;
int i;
+ if (!dma_inprogress(host))
+ return;
+
/* Wait up to 1ms for the DMA to complete */
for (i = 0; ; i++) {
status = readl(host->base + MMCISTATUS);
@@ -546,13 +985,12 @@ static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
* contiguous buffers. On TX, we'll get a FIFO underrun error.
*/
if (status & MCI_RXDATAAVLBLMASK) {
- mmci_dma_data_error(host);
+ mmci_dma_error(host);
if (!data->error)
data->error = -EIO;
- }
-
- if (!data->host_cookie)
+ } else if (!data->host_cookie) {
mmci_dma_unmap(host, data);
+ }
/*
* Use of DMA with scatter-gather is impossible.
@@ -564,15 +1002,16 @@ static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
}
host->dma_in_progress = false;
- host->dma_current = NULL;
- host->dma_desc_current = NULL;
+ dmae->cur = NULL;
+ dmae->desc_current = NULL;
}
/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
-static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
struct dma_chan **dma_chan,
struct dma_async_tx_descriptor **dma_desc)
{
+ struct mmci_dmae_priv *dmae = host->dma_priv;
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
.src_addr = host->phybase + MMCIFIFO,
@@ -581,7 +1020,7 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
- .device_fc = false,
+ .device_fc = variant->dma_flow_controller,
};
struct dma_chan *chan;
struct dma_device *device;
@@ -591,10 +1030,10 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_DEV_TO_MEM;
- chan = host->dma_rx_channel;
+ chan = dmae->rx_channel;
} else {
conf.direction = DMA_MEM_TO_DEV;
- chan = host->dma_tx_channel;
+ chan = dmae->tx_channel;
}
/* If there's no DMA channel, fall back to PIO */
@@ -605,6 +1044,18 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
if (data->blksz * data->blocks <= variant->fifosize)
return -EINVAL;
+ /*
+ * This is necessary to get SDIO working on the Ux500. We do not yet
+ * know if this is a bug in:
+ * - The Ux500 DMA controller (DMA40)
+ * - The MMCI DMA interface on the Ux500
+ * some power of two blocks (such as 64 bytes) are sent regularly
+ * during SDIO traffic and those work fine so for these we enable DMA
+ * transfers.
+ */
+ if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
+ return -EINVAL;
+
device = chan->device;
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
@@ -631,160 +1082,156 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return -ENOMEM;
}
-static inline int mmci_dma_prep_data(struct mmci_host *host,
- struct mmc_data *data)
+int mmci_dmae_prep_data(struct mmci_host *host,
+ struct mmc_data *data,
+ bool next)
{
+ struct mmci_dmae_priv *dmae = host->dma_priv;
+ struct mmci_dmae_next *nd = &dmae->next_data;
+
+ if (!host->use_dma)
+ return -EINVAL;
+
+ if (next)
+ return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
/* Check if next job is already prepared. */
- if (host->dma_current && host->dma_desc_current)
+ if (dmae->cur && dmae->desc_current)
return 0;
/* No job were prepared thus do it now. */
- return __mmci_dma_prep_data(host, data, &host->dma_current,
- &host->dma_desc_current);
-}
-
-static inline int mmci_dma_prep_next(struct mmci_host *host,
- struct mmc_data *data)
-{
- struct mmci_host_next *nd = &host->next_data;
- return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
+ return _mmci_dmae_prep_data(host, data, &dmae->cur,
+ &dmae->desc_current);
}
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
{
+ struct mmci_dmae_priv *dmae = host->dma_priv;
int ret;
- struct mmc_data *data = host->data;
-
- ret = mmci_dma_prep_data(host, host->data);
- if (ret)
- return ret;
- /* Okay, go for it. */
- dev_vdbg(mmc_dev(host->mmc),
- "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
- data->sg_len, data->blksz, data->blocks, data->flags);
host->dma_in_progress = true;
- dmaengine_submit(host->dma_desc_current);
- dma_async_issue_pending(host->dma_current);
-
- if (host->variant->qcom_dml)
- dml_start_xfer(host, data);
-
- datactrl |= MCI_DPSM_DMAENABLE;
+ ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
+ if (ret < 0) {
+ host->dma_in_progress = false;
+ return ret;
+ }
+ dma_async_issue_pending(dmae->cur);
- /* Trigger the DMA transfer */
- mmci_write_datactrlreg(host, datactrl);
+ *datactrl |= MCI_DPSM_DMAENABLE;
- /*
- * Let the MMCI say when the data is ended and it's time
- * to fire next DMA request. When that happens, MMCI will
- * call mmci_data_end()
- */
- writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
- host->base + MMCIMASK0);
return 0;
}
-static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
-{
- struct mmci_host_next *next = &host->next_data;
-
- WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
- WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
-
- host->dma_desc_current = next->dma_desc;
- host->dma_current = next->dma_chan;
- next->dma_desc = NULL;
- next->dma_chan = NULL;
-}
-
-static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
+void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
{
- struct mmci_host *host = mmc_priv(mmc);
- struct mmc_data *data = mrq->data;
- struct mmci_host_next *nd = &host->next_data;
+ struct mmci_dmae_priv *dmae = host->dma_priv;
+ struct mmci_dmae_next *next = &dmae->next_data;
- if (!data)
+ if (!host->use_dma)
return;
- BUG_ON(data->host_cookie);
+ WARN_ON(!data->host_cookie && (next->desc || next->chan));
- if (mmci_validate_data(host, data))
- return;
-
- if (!mmci_dma_prep_next(host, data))
- data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
+ dmae->desc_current = next->desc;
+ dmae->cur = next->chan;
+ next->desc = NULL;
+ next->chan = NULL;
}
-static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
- int err)
+void mmci_dmae_unprep_data(struct mmci_host *host,
+ struct mmc_data *data, int err)
+
{
- struct mmci_host *host = mmc_priv(mmc);
- struct mmc_data *data = mrq->data;
+ struct mmci_dmae_priv *dmae = host->dma_priv;
- if (!data || !data->host_cookie)
+ if (!host->use_dma)
return;
mmci_dma_unmap(host, data);
if (err) {
- struct mmci_host_next *next = &host->next_data;
+ struct mmci_dmae_next *next = &dmae->next_data;
struct dma_chan *chan;
if (data->flags & MMC_DATA_READ)
- chan = host->dma_rx_channel;
+ chan = dmae->rx_channel;
else
- chan = host->dma_tx_channel;
+ chan = dmae->tx_channel;
dmaengine_terminate_all(chan);
- if (host->dma_desc_current == next->dma_desc)
- host->dma_desc_current = NULL;
+ if (dmae->desc_current == next->desc)
+ dmae->desc_current = NULL;
- if (host->dma_current == next->dma_chan) {
+ if (dmae->cur == next->chan) {
host->dma_in_progress = false;
- host->dma_current = NULL;
+ dmae->cur = NULL;
}
- next->dma_desc = NULL;
- next->dma_chan = NULL;
- data->host_cookie = 0;
+ next->desc = NULL;
+ next->chan = NULL;
}
}
+static struct mmci_host_ops mmci_variant_ops = {
+ .prep_data = mmci_dmae_prep_data,
+ .unprep_data = mmci_dmae_unprep_data,
+ .get_datactrl_cfg = mmci_get_dctrl_cfg,
+ .get_next_data = mmci_dmae_get_next_data,
+ .dma_setup = mmci_dmae_setup,
+ .dma_release = mmci_dmae_release,
+ .dma_start = mmci_dmae_start,
+ .dma_finalize = mmci_dmae_finalize,
+ .dma_error = mmci_dmae_error,
+};
#else
-/* Blank functions if the DMA engine is not available */
-static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
-{
-}
-static inline void mmci_dma_setup(struct mmci_host *host)
-{
-}
+static struct mmci_host_ops mmci_variant_ops = {
+ .get_datactrl_cfg = mmci_get_dctrl_cfg,
+};
+#endif
-static inline void mmci_dma_release(struct mmci_host *host)
+static void mmci_variant_init(struct mmci_host *host)
{
+ host->ops = &mmci_variant_ops;
}
-static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+static void ux500_variant_init(struct mmci_host *host)
{
+ host->ops = &mmci_variant_ops;
+ host->ops->busy_complete = ux500_busy_complete;
}
-static inline void mmci_dma_finalize(struct mmci_host *host,
- struct mmc_data *data)
+static void ux500v2_variant_init(struct mmci_host *host)
{
+ host->ops = &mmci_variant_ops;
+ host->ops->busy_complete = ux500_busy_complete;
+ host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
}
-static inline void mmci_dma_data_error(struct mmci_host *host)
+static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return;
+
+ WARN_ON(data->host_cookie);
+
+ if (mmci_validate_data(host, data))
+ return;
+
+ mmci_prep_data(host, data, true);
}
-static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
{
- return -ENOSYS;
-}
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
-#define mmci_pre_request NULL
-#define mmci_post_request NULL
+ if (!data || !data->host_cookie)
+ return;
-#endif
+ mmci_unprep_data(host, data, err);
+}
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
{
@@ -792,7 +1239,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
unsigned int datactrl, timeout, irqmask;
unsigned long long clks;
void __iomem *base;
- int blksz_bits;
dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
data->blksz, data->blocks, data->flags);
@@ -810,18 +1256,8 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
writel(timeout, base + MMCIDATATIMER);
writel(host->size, base + MMCIDATALENGTH);
- blksz_bits = ffs(data->blksz) - 1;
- BUG_ON(1 << blksz_bits != data->blksz);
-
- if (variant->blksz_datactrl16)
- datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
- else if (variant->blksz_datactrl4)
- datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
- else
- datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
-
- if (data->flags & MMC_DATA_READ)
- datactrl |= MCI_DPSM_DIRECTION;
+ datactrl = host->ops->get_datactrl_cfg(host);
+ datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
u32 clk;
@@ -852,7 +1288,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
*/
- if (!mmci_dma_start_data(host, datactrl))
+ if (!mmci_dma_start(host, datactrl))
return;
/* IRQ mode, map the SG list for CPU reading/writing */
@@ -885,21 +1321,51 @@ static void
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
{
void __iomem *base = host->base;
+ bool busy_resp = cmd->flags & MMC_RSP_BUSY;
+ unsigned long long clks;
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
cmd->opcode, cmd->arg, cmd->flags);
- if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
+ if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
writel(0, base + MMCICOMMAND);
mmci_reg_delay(host);
}
- c |= cmd->opcode | MCI_CPSM_ENABLE;
+ if (host->variant->cmdreg_stop &&
+ cmd->opcode == MMC_STOP_TRANSMISSION)
+ c |= host->variant->cmdreg_stop;
+
+ c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136)
- c |= MCI_CPSM_LONGRSP;
- c |= MCI_CPSM_RESPONSE;
+ c |= host->variant->cmdreg_lrsp_crc;
+ else if (cmd->flags & MMC_RSP_CRC)
+ c |= host->variant->cmdreg_srsp_crc;
+ else
+ c |= host->variant->cmdreg_srsp;
+ }
+
+ host->busy_status = 0;
+ host->busy_state = MMCI_BUSY_DONE;
+
+ /* Assign a default timeout if the core does not provide one */
+ if (busy_resp && !cmd->busy_timeout)
+ cmd->busy_timeout = 10 * MSEC_PER_SEC;
+
+ if (busy_resp && host->variant->busy_timeout) {
+ if (cmd->busy_timeout > host->mmc->max_busy_timeout)
+ clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
+ else
+ clks = (unsigned long long)cmd->busy_timeout * host->cclk;
+
+ do_div(clks, MSEC_PER_SEC);
+ writel_relaxed(clks, host->base + MMCIDATATIMER);
}
+
+ if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
+ host->ops->pre_sig_volt_switch(host);
+
if (/*interrupt*/0)
c |= MCI_CPSM_INTERRUPT;
@@ -912,24 +1378,32 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
writel(c, base + MMCICOMMAND);
}
+static void mmci_stop_command(struct mmci_host *host)
+{
+ host->stop_abort.error = 0;
+ mmci_start_command(host, &host->stop_abort, 0);
+}
+
static void
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
{
+ unsigned int status_err;
+
/* Make sure we have data to handle */
if (!data)
return;
/* First check for errors */
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
- MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ status_err = status & (host->variant->start_err |
+ MCI_DATACRCFAIL | MCI_DATATIMEOUT |
+ MCI_TXUNDERRUN | MCI_RXOVERRUN);
+
+ if (status_err) {
u32 remain, success;
/* Terminate the DMA transfer */
- if (dma_inprogress(host)) {
- mmci_dma_data_error(host);
- mmci_dma_unmap(host, data);
- }
+ mmci_dma_error(host);
/*
* Calculate how far we are into the transfer. Note that
@@ -938,22 +1412,26 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
* can be as much as a FIFO-worth of data ahead. This
* matters for FIFO overruns only.
*/
- remain = readl(host->base + MMCIDATACNT);
- success = data->blksz * data->blocks - remain;
+ if (!host->variant->datacnt_useless) {
+ remain = readl(host->base + MMCIDATACNT);
+ success = data->blksz * data->blocks - remain;
+ } else {
+ success = 0;
+ }
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
- status, success);
- if (status & MCI_DATACRCFAIL) {
+ status_err, success);
+ if (status_err & MCI_DATACRCFAIL) {
/* Last block was not successful */
success -= 1;
data->error = -EILSEQ;
- } else if (status & MCI_DATATIMEOUT) {
+ } else if (status_err & MCI_DATATIMEOUT) {
data->error = -ETIMEDOUT;
- } else if (status & MCI_STARTBITERR) {
+ } else if (status_err & MCI_STARTBITERR) {
data->error = -ECOMM;
- } else if (status & MCI_TXUNDERRUN) {
+ } else if (status_err & MCI_TXUNDERRUN) {
data->error = -EIO;
- } else if (status & MCI_RXOVERRUN) {
+ } else if (status_err & MCI_RXOVERRUN) {
if (success > host->variant->fifosize)
success -= host->variant->fifosize;
else
@@ -967,15 +1445,20 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
if (status & MCI_DATAEND || data->error) {
- if (dma_inprogress(host))
- mmci_dma_finalize(host, data);
+ mmci_dma_finalize(host, data);
+
mmci_stop_data(host);
if (!data->error)
/* The error clause is handled above, success! */
data->bytes_xfered = data->blksz * data->blocks;
- if (!data->stop || host->mrq->sbc) {
+ if (!data->stop) {
+ if (host->variant->cmdreg_stop && data->error)
+ mmci_stop_command(host);
+ else
+ mmci_request_end(host, data->mrq);
+ } else if (host->mrq->sbc && !data->error) {
mmci_request_end(host, data->mrq);
} else {
mmci_start_command(host, data->stop, 0);
@@ -987,77 +1470,32 @@ static void
mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
unsigned int status)
{
+ u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
void __iomem *base = host->base;
- bool sbc;
+ bool sbc, busy_resp;
if (!cmd)
return;
sbc = (cmd == host->mrq->sbc);
+ busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
/*
* We need to be one of these interrupts to be considered worth
* handling. Note that we tag on any latent IRQs postponed
* due to waiting for busy status.
*/
- if (!((status|host->busy_status) &
- (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
- return;
-
- /*
- * ST Micro variant: handle busy detection.
- */
- if (host->variant->busy_detect) {
- bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
+ if (host->variant->busy_timeout && busy_resp)
+ err_msk |= MCI_DATATIMEOUT;
- /* We are busy with a command, return */
- if (host->busy_status &&
- (status & host->variant->busy_detect_flag))
- return;
-
- /*
- * We were not busy, but we now got a busy response on
- * something that was not an error, and we double-check
- * that the special busy status bit is still set before
- * proceeding.
- */
- if (!host->busy_status && busy_resp &&
- !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
- (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
-
- /* Clear the busy start IRQ */
- writel(host->variant->busy_detect_mask,
- host->base + MMCICLEAR);
+ if (!((status | host->busy_status) &
+ (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
+ return;
- /* Unmask the busy end IRQ */
- writel(readl(base + MMCIMASK0) |
- host->variant->busy_detect_mask,
- base + MMCIMASK0);
- /*
- * Now cache the last response status code (until
- * the busy bit goes low), and return.
- */
- host->busy_status =
- status & (MCI_CMDSENT|MCI_CMDRESPEND);
+ /* Handle busy detection on DAT0 if the variant supports it. */
+ if (busy_resp && host->variant->busy_detect)
+ if (!host->ops->busy_complete(host, cmd, status, err_msk))
return;
- }
-
- /*
- * At this point we are not busy with a command, we have
- * not received a new busy request, clear and mask the busy
- * end IRQ and fall through to process the IRQ.
- */
- if (host->busy_status) {
-
- writel(host->variant->busy_detect_mask,
- host->base + MMCICLEAR);
-
- writel(readl(base + MMCIMASK0) &
- ~host->variant->busy_detect_mask,
- base + MMCIMASK0);
- host->busy_status = 0;
- }
- }
host->cmd = NULL;
@@ -1065,6 +1503,14 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
cmd->error = -ETIMEDOUT;
} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
cmd->error = -EILSEQ;
+ } else if (host->variant->busy_timeout && busy_resp &&
+ status & MCI_DATATIMEOUT) {
+ cmd->error = -ETIMEDOUT;
+ /*
+ * This will wake up mmci_irq_thread() which will issue
+ * a hardware reset of the MMCI block.
+ */
+ host->irq_action = IRQ_WAKE_THREAD;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
cmd->resp[1] = readl(base + MMCIRESPONSE1);
@@ -1075,20 +1521,74 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
if ((!sbc && !cmd->data) || cmd->error) {
if (host->data) {
/* Terminate the DMA transfer */
- if (dma_inprogress(host)) {
- mmci_dma_data_error(host);
- mmci_dma_unmap(host, host->data);
- }
+ mmci_dma_error(host);
+
mmci_stop_data(host);
+ if (host->variant->cmdreg_stop && cmd->error) {
+ mmci_stop_command(host);
+ return;
+ }
}
- mmci_request_end(host, host->mrq);
+
+ if (host->irq_action != IRQ_WAKE_THREAD)
+ mmci_request_end(host, host->mrq);
+
} else if (sbc) {
mmci_start_command(host, host->mrq->cmd, 0);
- } else if (!(cmd->data->flags & MMC_DATA_READ)) {
+ } else if (!host->variant->datactrl_first &&
+ !(cmd->data->flags & MMC_DATA_READ)) {
mmci_start_data(host, cmd->data);
}
}
+static char *ux500_state_str(struct mmci_host *host)
+{
+ switch (host->busy_state) {
+ case MMCI_BUSY_WAITING_FOR_START_IRQ:
+ return "waiting for start IRQ";
+ case MMCI_BUSY_WAITING_FOR_END_IRQ:
+ return "waiting for end IRQ";
+ case MMCI_BUSY_DONE:
+ return "not waiting for IRQs";
+ default:
+ return "unknown";
+ }
+}
+
+/*
+ * This busy timeout worker is used to "kick" the command IRQ if a
+ * busy detect IRQ fails to appear in reasonable time. Only used on
+ * variants with busy detection IRQ delivery.
+ */
+static void ux500_busy_timeout_work(struct work_struct *work)
+{
+ struct mmci_host *host = container_of(work, struct mmci_host,
+ ux500_busy_timeout_work.work);
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->cmd) {
+ /* If we are still busy let's tag on a cmd-timeout error. */
+ status = readl(host->base + MMCISTATUS);
+ if (status & host->variant->busy_detect_flag) {
+ status |= MCI_CMDTIMEOUT;
+ dev_err(mmc_dev(host->mmc),
+ "timeout in state %s still busy with CMD%02x\n",
+ ux500_state_str(host), host->cmd->opcode);
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "timeout in state %s waiting for busy CMD%02x\n",
+ ux500_state_str(host), host->cmd->opcode);
+ }
+
+ mmci_cmd_irq(host, host->cmd, status);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
{
return remain - (readl(host->base + MMCIFIFOCNT) << 2);
@@ -1200,15 +1700,12 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
struct sg_mapping_iter *sg_miter = &host->sg_miter;
struct variant_data *variant = host->variant;
void __iomem *base = host->base;
- unsigned long flags;
u32 status;
status = readl(base + MMCISTATUS);
dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
- local_irq_save(flags);
-
do {
unsigned int remain, len;
char *buffer;
@@ -1248,8 +1745,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
sg_miter_stop(sg_miter);
- local_irq_restore(flags);
-
/*
* If we have less than the fifo 'half-full' threshold to transfer,
* trigger a PIO interrupt as soon as any data is available.
@@ -1271,6 +1766,25 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void mmci_write_sdio_irq_bit(struct mmci_host *host, int enable)
+{
+ void __iomem *base = host->base;
+ u32 mask = readl_relaxed(base + MMCIMASK0);
+
+ if (enable)
+ writel_relaxed(mask | MCI_ST_SDIOITMASK, base + MMCIMASK0);
+ else
+ writel_relaxed(mask & ~MCI_ST_SDIOITMASK, base + MMCIMASK0);
+}
+
+static void mmci_signal_sdio_irq(struct mmci_host *host, u32 status)
+{
+ if (status & MCI_ST_SDIOIT) {
+ mmci_write_sdio_irq_bit(host, 0);
+ sdio_signal_irq(host->mmc);
+ }
+}
+
/*
* Handle completion of command and data transfers.
*/
@@ -1278,29 +1792,25 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
u32 status;
- int ret = 0;
spin_lock(&host->lock);
+ host->irq_action = IRQ_HANDLED;
do {
status = readl(host->base + MMCISTATUS);
+ if (!status)
+ break;
if (host->singleirq) {
- if (status & readl(host->base + MMCIMASK1))
+ if (status & host->mask1_reg)
mmci_pio_irq(irq, dev_id);
- status &= ~MCI_IRQ1MASK;
+ status &= ~host->variant->irq_pio_mask;
}
/*
- * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
- * enabled) in mmci_cmd_irq() function where ST Micro busy
- * detection variant is handled. Considering the HW seems to be
- * triggering the IRQ on both edges while monitoring DAT0 for
- * busy completion and that same status bit is used to monitor
- * start and end of busy detection, special care must be taken
- * to make sure that both start and end interrupts are always
- * cleared one after the other.
+ * Busy detection is managed by mmci_cmd_irq(), including to
+ * clear the corresponding IRQ.
*/
status &= readl(host->base + MMCIMASK0);
if (host->variant->busy_detect)
@@ -1319,18 +1829,51 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
mmci_data_irq(host, host->data, status);
}
+ if (host->variant->supports_sdio_irq)
+ mmci_signal_sdio_irq(host, status);
+
/*
- * Don't poll for busy completion in irq context.
+ * Busy detection has been handled by mmci_cmd_irq() above.
+ * Clear the status bit to prevent polling in IRQ context.
*/
- if (host->variant->busy_detect && host->busy_status)
+ if (host->variant->busy_detect_flag)
status &= ~host->variant->busy_detect_flag;
- ret = 1;
} while (status);
spin_unlock(&host->lock);
- return IRQ_RETVAL(ret);
+ return host->irq_action;
+}
+
+/*
+ * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
+ *
+ * A reset is needed for some variants, where a datatimeout for a R1B request
+ * causes the DPSM to stay busy (non-functional).
+ */
+static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
+{
+ struct mmci_host *host = dev_id;
+ unsigned long flags;
+
+ if (host->rst) {
+ reset_control_assert(host->rst);
+ udelay(2);
+ reset_control_deassert(host->rst);
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
+ writel(MCI_IRQENABLE | host->variant->start_err,
+ host->base + MMCIMASK0);
+
+ host->irq_action = IRQ_HANDLED;
+ mmci_request_end(host, host->mrq);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return host->irq_action;
}
static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -1353,7 +1896,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (mrq->data)
mmci_get_next_data(host, mrq->data);
- if (mrq->data && mrq->data->flags & MMC_DATA_READ)
+ if (mrq->data &&
+ (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
mmci_start_data(host, mrq->data);
if (mrq->sbc)
@@ -1364,6 +1908,21 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
}
+static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ u32 max_busy_timeout = 0;
+
+ if (!host->variant->busy_detect)
+ return;
+
+ if (host->variant->busy_timeout && mmc->actual_clock)
+ max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
+ MSEC_PER_SEC);
+
+ mmc->max_busy_timeout = max_busy_timeout;
+}
+
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
@@ -1372,10 +1931,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned long flags;
int ret;
- if (host->plat->ios_handler &&
- host->plat->ios_handler(mmc_dev(mmc), ios))
- dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
-
switch (ios->power_mode) {
case MMC_POWER_OFF:
if (!IS_ERR(mmc->supply.vmmc))
@@ -1429,16 +1984,18 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
~MCI_ST_DATA2DIREN);
}
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
- if (host->hw_designer != AMBA_VENDOR_ST)
- pwr |= MCI_ROD;
- else {
- /*
- * The ST Micro variant use the ROD bit for something
- * else and only has OD (Open Drain).
- */
- pwr |= MCI_OD;
- }
+ if (variant->opendrain) {
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pwr |= variant->opendrain;
+ } else {
+ /*
+ * If the variant cannot configure the pads by its own, then we
+ * expect the pinctrl to be able to do that for us
+ */
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pinctrl_select_state(host->pinctrl, host->pins_opendrain);
+ else
+ pinctrl_select_default_state(mmc_dev(mmc));
}
/*
@@ -1461,8 +2018,18 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_lock_irqsave(&host->lock, flags);
- mmci_set_clkreg(host, ios->clock);
- mmci_write_pwrreg(host, pwr);
+ if (host->ops && host->ops->set_clkreg)
+ host->ops->set_clkreg(host, ios->clock);
+ else
+ mmci_set_clkreg(host, ios->clock);
+
+ mmci_set_max_busy_timeout(mmc);
+
+ if (host->ops && host->ops->set_pwrreg)
+ host->ops->set_pwrreg(host, pwr);
+ else
+ mmci_write_pwrreg(host, pwr);
+
mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
@@ -1485,30 +2052,48 @@ static int mmci_get_cd(struct mmc_host *mmc)
static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
{
- int ret = 0;
+ struct mmci_host *host = mmc_priv(mmc);
+ int ret;
+
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
+
+ if (!ret && host->ops && host->ops->post_sig_volt_switch)
+ ret = host->ops->post_sig_volt_switch(host, ios);
+ else if (ret)
+ ret = 0;
+
+ if (ret < 0)
+ dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
+
+ return ret;
+}
+
+static void mmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
- if (!IS_ERR(mmc->supply.vqmmc)) {
+ if (enable)
+ /* Keep the SDIO mode bit if SDIO irqs are enabled */
+ pm_runtime_get_sync(mmc_dev(mmc));
- switch (ios->signal_voltage) {
- case MMC_SIGNAL_VOLTAGE_330:
- ret = regulator_set_voltage(mmc->supply.vqmmc,
- 2700000, 3600000);
- break;
- case MMC_SIGNAL_VOLTAGE_180:
- ret = regulator_set_voltage(mmc->supply.vqmmc,
- 1700000, 1950000);
- break;
- case MMC_SIGNAL_VOLTAGE_120:
- ret = regulator_set_voltage(mmc->supply.vqmmc,
- 1100000, 1300000);
- break;
- }
+ spin_lock_irqsave(&host->lock, flags);
+ mmci_write_sdio_irq_bit(host, enable);
+ spin_unlock_irqrestore(&host->lock, flags);
- if (ret)
- dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
+ if (!enable) {
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
}
+}
- return ret;
+static void mmci_ack_sdio_irq(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ mmci_write_sdio_irq_bit(host, 1);
+ spin_unlock_irqrestore(&host->lock, flags);
}
static struct mmc_host_ops mmci_ops = {
@@ -1521,6 +2106,65 @@ static struct mmc_host_ops mmci_ops = {
.start_signal_voltage_switch = mmci_sig_volt_switch,
};
+static void mmci_probe_level_translator(struct mmc_host *mmc)
+{
+ struct device *dev = mmc_dev(mmc);
+ struct mmci_host *host = mmc_priv(mmc);
+ struct gpio_desc *cmd_gpio;
+ struct gpio_desc *ck_gpio;
+ struct gpio_desc *ckin_gpio;
+ int clk_hi, clk_lo;
+
+ /*
+ * Assume the level translator is present if st,use-ckin is set.
+ * This is to cater for DTs which do not implement this test.
+ */
+ host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
+
+ cmd_gpio = gpiod_get(dev, "st,cmd", GPIOD_OUT_HIGH);
+ if (IS_ERR(cmd_gpio))
+ goto exit_cmd;
+
+ ck_gpio = gpiod_get(dev, "st,ck", GPIOD_OUT_HIGH);
+ if (IS_ERR(ck_gpio))
+ goto exit_ck;
+
+ ckin_gpio = gpiod_get(dev, "st,ckin", GPIOD_IN);
+ if (IS_ERR(ckin_gpio))
+ goto exit_ckin;
+
+ /* All GPIOs are valid, test whether level translator works */
+
+ /* Sample CKIN */
+ clk_hi = !!gpiod_get_value(ckin_gpio);
+
+ /* Set CK low */
+ gpiod_set_value(ck_gpio, 0);
+
+ /* Sample CKIN */
+ clk_lo = !!gpiod_get_value(ckin_gpio);
+
+ /* Tristate all */
+ gpiod_direction_input(cmd_gpio);
+ gpiod_direction_input(ck_gpio);
+
+ /* Level translator is present if CK signal is propagated to CKIN */
+ if (!clk_hi || clk_lo) {
+ host->clk_reg_add &= ~MCI_STM32_CLK_SELCKIN;
+ dev_warn(dev,
+ "Level translator inoperable, CK signal not detected on CKIN, disabling.\n");
+ }
+
+ gpiod_put(ckin_gpio);
+
+exit_ckin:
+ gpiod_put(ck_gpio);
+exit_ck:
+ gpiod_put(cmd_gpio);
+exit_cmd:
+ pinctrl_select_default_state(dev);
+}
+
static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
@@ -1529,22 +2173,28 @@ static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
if (ret)
return ret;
- if (of_get_property(np, "st,sig-dir-dat0", NULL))
+ if (of_property_read_bool(np, "st,sig-dir-dat0"))
host->pwr_reg_add |= MCI_ST_DATA0DIREN;
- if (of_get_property(np, "st,sig-dir-dat2", NULL))
+ if (of_property_read_bool(np, "st,sig-dir-dat2"))
host->pwr_reg_add |= MCI_ST_DATA2DIREN;
- if (of_get_property(np, "st,sig-dir-dat31", NULL))
+ if (of_property_read_bool(np, "st,sig-dir-dat31"))
host->pwr_reg_add |= MCI_ST_DATA31DIREN;
- if (of_get_property(np, "st,sig-dir-dat74", NULL))
+ if (of_property_read_bool(np, "st,sig-dir-dat74"))
host->pwr_reg_add |= MCI_ST_DATA74DIREN;
- if (of_get_property(np, "st,sig-dir-cmd", NULL))
+ if (of_property_read_bool(np, "st,sig-dir-cmd"))
host->pwr_reg_add |= MCI_ST_CMDDIREN;
- if (of_get_property(np, "st,sig-pin-fbclk", NULL))
+ if (of_property_read_bool(np, "st,sig-pin-fbclk"))
host->pwr_reg_add |= MCI_ST_FBCLKEN;
-
- if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
+ if (of_property_read_bool(np, "st,sig-dir"))
+ host->pwr_reg_add |= MCI_STM32_DIRPOL;
+ if (of_property_read_bool(np, "st,neg-edge"))
+ host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
+ if (of_property_read_bool(np, "st,use-ckin"))
+ mmci_probe_level_translator(mmc);
+
+ if (of_property_read_bool(np, "mmc-cap-mmc-highspeed"))
mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
- if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
+ if (of_property_read_bool(np, "mmc-cap-sd-highspeed"))
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
return 0;
@@ -1572,16 +2222,35 @@ static int mmci_probe(struct amba_device *dev,
return -ENOMEM;
}
- mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
+ mmc = devm_mmc_alloc_host(&dev->dev, sizeof(*host));
if (!mmc)
return -ENOMEM;
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->mmc_ops = &mmci_ops;
+ mmc->ops = &mmci_ops;
+
ret = mmci_of_parse(np, mmc);
if (ret)
- goto host_free;
+ return ret;
- host = mmc_priv(mmc);
- host->mmc = mmc;
+ /*
+ * Some variant (STM32) doesn't have opendrain bit, nevertheless
+ * pins can be set accordingly using pinctrl
+ */
+ if (!variant->opendrain) {
+ host->pinctrl = devm_pinctrl_get(&dev->dev);
+ if (IS_ERR(host->pinctrl))
+ return dev_err_probe(&dev->dev, PTR_ERR(host->pinctrl),
+ "failed to get pinctrl\n");
+
+ host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
+ MMCI_PINCTRL_STATE_OPENDRAIN);
+ if (IS_ERR(host->pins_opendrain))
+ return dev_err_probe(&dev->dev, PTR_ERR(host->pins_opendrain),
+ "Can't select opendrain pins\n");
+ }
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
@@ -1589,14 +2258,12 @@ static int mmci_probe(struct amba_device *dev,
dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
host->clk = devm_clk_get(&dev->dev, NULL);
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- goto host_free;
- }
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
ret = clk_prepare_enable(host->clk);
if (ret)
- goto host_free;
+ return ret;
if (variant->qcom_fifo)
host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
@@ -1627,6 +2294,9 @@ static int mmci_probe(struct amba_device *dev,
goto clk_disable;
}
+ if (variant->init)
+ variant->init(host);
+
/*
* The ARM and ST versions of the block have slightly different
* clock divider equations which means that the minimum divider
@@ -1635,6 +2305,8 @@ static int mmci_probe(struct amba_device *dev,
*/
if (variant->st_clkdiv)
mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
+ else if (variant->stm32_clkdiv)
+ mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
else if (variant->explicit_mclk_control)
mmc->f_min = clk_round_rate(host->clk, 100000);
else
@@ -1656,9 +2328,18 @@ static int mmci_probe(struct amba_device *dev,
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
+ host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
+ if (IS_ERR(host->rst)) {
+ ret = PTR_ERR(host->rst);
+ goto clk_disable;
+ }
+ ret = reset_control_deassert(host->rst);
+ if (ret)
+ dev_err(mmc_dev(mmc), "failed to de-assert reset\n");
+
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto clk_disable;
if (!mmc->ocr_avail)
@@ -1666,13 +2347,6 @@ static int mmci_probe(struct amba_device *dev,
else if (plat->ocr_mask)
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
- /* DT takes precedence over platform data. */
- if (!np) {
- if (!plat->cd_invert)
- mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
- mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
- }
-
/* We support these capabilities. */
mmc->caps |= MMC_CAP_CMD23;
@@ -1689,10 +2363,26 @@ static int mmci_probe(struct amba_device *dev,
mmci_write_datactrlreg(host,
host->variant->busy_dpsm_flag);
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- mmc->max_busy_timeout = 0;
}
- mmc->ops = &mmci_ops;
+ if (variant->supports_sdio_irq && host->mmc->caps & MMC_CAP_SDIO_IRQ) {
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
+ mmci_ops.enable_sdio_irq = mmci_enable_sdio_irq;
+ mmci_ops.ack_sdio_irq = mmci_ack_sdio_irq;
+
+ mmci_write_datactrlreg(host,
+ host->variant->datactrl_mask_sdio);
+ }
+
+ /* Variants with mandatory busy timeout in HW needs R1B responses. */
+ if (variant->busy_timeout)
+ mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
+ /* Prepare a CMD12 - needed to clear the DPSM on some variants. */
+ host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
+ host->stop_abort.arg = 0;
+ host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
/* We support these PM capabilities. */
mmc->pm_caps |= MMC_PM_KEEP_POWER;
@@ -1718,18 +2408,21 @@ static int mmci_probe(struct amba_device *dev,
/*
* Block size can be up to 2048 bytes, but must be a power of two.
*/
- mmc->max_blk_size = 1 << 11;
+ mmc->max_blk_size = 1 << variant->datactrl_blocksz;
/*
* Limit the number of blocks transferred so that we don't overflow
* the maximum request size.
*/
- mmc->max_blk_count = mmc->max_req_size >> 11;
+ mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
spin_lock_init(&host->lock);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
+
writel(0xfff, host->base + MMCICLEAR);
/*
@@ -1737,34 +2430,21 @@ static int mmci_probe(struct amba_device *dev,
* - not using DT but using a descriptor table, or
* - using a table of descriptors ALONGSIDE DT, or
* look up these descriptors named "cd" and "wp" right here, fail
- * silently of these do not exist and proceed to try platform data
+ * silently of these do not exist
*/
if (!np) {
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
- if (ret < 0) {
- if (ret == -EPROBE_DEFER)
- goto clk_disable;
- else if (gpio_is_valid(plat->gpio_cd)) {
- ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
- if (ret)
- goto clk_disable;
- }
- }
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
+ if (ret == -EPROBE_DEFER)
+ goto clk_disable;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
- if (ret < 0) {
- if (ret == -EPROBE_DEFER)
- goto clk_disable;
- else if (gpio_is_valid(plat->gpio_wp)) {
- ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
- if (ret)
- goto clk_disable;
- }
- }
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
+ if (ret == -EPROBE_DEFER)
+ goto clk_disable;
}
- ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
- DRIVER_NAME " (cmd)", host);
+ ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
+ mmci_irq_thread, IRQF_SHARED,
+ DRIVER_NAME " (cmd)", host);
if (ret)
goto clk_disable;
@@ -1777,7 +2457,11 @@ static int mmci_probe(struct amba_device *dev,
goto clk_disable;
}
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ if (host->variant->busy_detect)
+ INIT_DELAYED_WORK(&host->ux500_busy_timeout_work,
+ ux500_busy_timeout_work);
+
+ writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
amba_set_drvdata(dev, mmc);
@@ -1791,24 +2475,25 @@ static int mmci_probe(struct amba_device *dev,
pm_runtime_set_autosuspend_delay(&dev->dev, 50);
pm_runtime_use_autosuspend(&dev->dev);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto clk_disable;
pm_runtime_put(&dev->dev);
return 0;
clk_disable:
clk_disable_unprepare(host->clk);
- host_free:
- mmc_free_host(mmc);
return ret;
}
-static int mmci_remove(struct amba_device *dev)
+static void mmci_remove(struct amba_device *dev)
{
struct mmc_host *mmc = amba_get_drvdata(dev);
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
/*
* Undo pm_runtime_put() in probe. We use the _sync
@@ -1819,20 +2504,18 @@ static int mmci_remove(struct amba_device *dev)
mmc_remove_host(mmc);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
mmci_dma_release(host);
clk_disable_unprepare(host->clk);
- mmc_free_host(mmc);
}
-
- return 0;
}
-#ifdef CONFIG_PM
static void mmci_save(struct mmci_host *host)
{
unsigned long flags;
@@ -1861,7 +2544,8 @@ static void mmci_restore(struct mmci_host *host)
writel(host->datactrl_reg, host->base + MMCIDATACTRL);
writel(host->pwr_reg, host->base + MMCIPOWER);
}
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ writel(MCI_IRQENABLE | host->variant->start_err,
+ host->base + MMCIMASK0);
mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
@@ -1891,20 +2575,18 @@ static int mmci_runtime_resume(struct device *dev)
struct mmci_host *host = mmc_priv(mmc);
clk_prepare_enable(host->clk);
mmci_restore(host);
- pinctrl_pm_select_default_state(dev);
+ pinctrl_select_default_state(dev);
}
return 0;
}
-#endif
static const struct dev_pm_ops mmci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
};
-static struct amba_id mmci_ids[] = {
+static const struct amba_id mmci_ids[] = {
{
.id = 0x00041180,
.mask = 0xff0fffff,
@@ -1951,6 +2633,31 @@ static struct amba_id mmci_ids[] = {
.mask = 0xf0ffffff,
.data = &variant_ux500v2,
},
+ {
+ .id = 0x00880180,
+ .mask = 0x00ffffff,
+ .data = &variant_stm32,
+ },
+ {
+ .id = 0x10153180,
+ .mask = 0xf0ffffff,
+ .data = &variant_stm32_sdmmc,
+ },
+ {
+ .id = 0x00253180,
+ .mask = 0xf0ffffff,
+ .data = &variant_stm32_sdmmcv2,
+ },
+ {
+ .id = 0x20253180,
+ .mask = 0xf0ffffff,
+ .data = &variant_stm32_sdmmcv2,
+ },
+ {
+ .id = 0x00353180,
+ .mask = 0xf0ffffff,
+ .data = &variant_stm32_sdmmcv3,
+ },
/* Qualcomm variants */
{
.id = 0x00051180,
@@ -1965,7 +2672,8 @@ MODULE_DEVICE_TABLE(amba, mmci_ids);
static struct amba_driver mmci_driver = {
.drv = {
.name = DRIVER_NAME,
- .pm = &mmci_dev_pm_ops,
+ .pm = pm_ptr(&mmci_dev_pm_ops),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = mmci_probe,
.remove = mmci_remove,