From 37c8ceb6d92c955f5dd8223c3f6c90b277322210 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Mon, 30 Oct 2023 08:22:26 +0200 Subject: mmc: core: Remove packed command leftovers Packed commands support was removed long time ago, but some bits got left behind. Remove them. Signed-off-by: Avri Altman Link: https://lore.kernel.org/r/20231030062226.1895692-1-avri.altman@wdc.com Signed-off-by: Ulf Hansson --- drivers/mmc/core/mmc.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 705942edacc6..7b996db570c9 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -613,11 +613,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) } else { card->ext_csd.data_tag_unit_size = 0; } - - card->ext_csd.max_packed_writes = - ext_csd[EXT_CSD_MAX_PACKED_WRITES]; - card->ext_csd.max_packed_reads = - ext_csd[EXT_CSD_MAX_PACKED_READS]; } else { card->ext_csd.data_sector_size = 512; } -- cgit From 1bcfbfd7c9aa716f61a01682345a1b329f6a6e66 Mon Sep 17 00:00:00 2001 From: Christophe Kerello Date: Wed, 8 Nov 2023 15:16:37 +0100 Subject: mmc: mmci: stm32: add SDIO in-band interrupt mode Add the support of SDIO in-band interrupt mode for STM32 and Ux500 variants. It allows the SD I/O card to interrupt the host on SDMMC_D1 data line. It is not enabled by default on Ux500 variant as this is unstable and Ux500 users should use out-of-band IRQs. Signed-off-by: Christophe Kerello Signed-off-by: Yann Gautier Reviewed-by: Linus Walleij Link: https://lore.kernel.org/r/20231108141637.119497-1-yann.gautier@foss.st.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmci.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++-- drivers/mmc/host/mmci.h | 2 ++ 2 files changed, 69 insertions(+), 2 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index e967cca7a16f..b790c3c3c8f9 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -273,6 +273,7 @@ static struct variant_data variant_stm32_sdmmc = { .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .stm32_idmabsize_mask = GENMASK(12, 5), .stm32_idmabsize_align = BIT(5), + .supports_sdio_irq = true, .busy_timeout = true, .busy_detect = true, .busy_detect_flag = MCI_STM32_BUSYD0, @@ -300,6 +301,7 @@ static struct variant_data variant_stm32_sdmmcv2 = { .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .stm32_idmabsize_mask = GENMASK(16, 5), .stm32_idmabsize_align = BIT(5), + .supports_sdio_irq = true, .dma_lli = true, .busy_timeout = true, .busy_detect = true, @@ -328,6 +330,7 @@ static struct variant_data variant_stm32_sdmmcv3 = { .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .stm32_idmabsize_mask = GENMASK(16, 6), .stm32_idmabsize_align = BIT(6), + .supports_sdio_irq = true, .dma_lli = true, .busy_timeout = true, .busy_detect = true, @@ -421,8 +424,9 @@ void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) */ static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) { - /* Keep busy mode in DPSM if enabled */ - datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag; + /* Keep busy mode in DPSM and SDIO mask if enabled */ + datactrl |= host->datactrl_reg & (host->variant->busy_dpsm_flag | + host->variant->datactrl_mask_sdio); if (host->datactrl_reg != datactrl) { host->datactrl_reg = datactrl; @@ -1762,6 +1766,25 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) return IRQ_HANDLED; } +static void mmci_write_sdio_irq_bit(struct mmci_host *host, int enable) +{ + void __iomem *base = host->base; + u32 mask = readl_relaxed(base + MMCIMASK0); + + if (enable) + writel_relaxed(mask | MCI_ST_SDIOITMASK, base + MMCIMASK0); + else + writel_relaxed(mask & ~MCI_ST_SDIOITMASK, base + MMCIMASK0); +} + +static void mmci_signal_sdio_irq(struct mmci_host *host, u32 status) +{ + if (status & MCI_ST_SDIOIT) { + mmci_write_sdio_irq_bit(host, 0); + sdio_signal_irq(host->mmc); + } +} + /* * Handle completion of command and data transfers. */ @@ -1806,6 +1829,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) mmci_data_irq(host, host->data, status); } + if (host->variant->supports_sdio_irq) + mmci_signal_sdio_irq(host, status); + /* * Busy detection has been handled by mmci_cmd_irq() above. * Clear the status bit to prevent polling in IRQ context. @@ -2042,6 +2068,35 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) return ret; } +static void mmci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct mmci_host *host = mmc_priv(mmc); + unsigned long flags; + + if (enable) + /* Keep the SDIO mode bit if SDIO irqs are enabled */ + pm_runtime_get_sync(mmc_dev(mmc)); + + spin_lock_irqsave(&host->lock, flags); + mmci_write_sdio_irq_bit(host, enable); + spin_unlock_irqrestore(&host->lock, flags); + + if (!enable) { + pm_runtime_mark_last_busy(mmc_dev(mmc)); + pm_runtime_put_autosuspend(mmc_dev(mmc)); + } +} + +static void mmci_ack_sdio_irq(struct mmc_host *mmc) +{ + struct mmci_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + mmci_write_sdio_irq_bit(host, 1); + spin_unlock_irqrestore(&host->lock, flags); +} + static struct mmc_host_ops mmci_ops = { .request = mmci_request, .pre_req = mmci_pre_request, @@ -2317,6 +2372,16 @@ static int mmci_probe(struct amba_device *dev, mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; } + if (variant->supports_sdio_irq && host->mmc->caps & MMC_CAP_SDIO_IRQ) { + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + + mmci_ops.enable_sdio_irq = mmci_enable_sdio_irq; + mmci_ops.ack_sdio_irq = mmci_ack_sdio_irq; + + mmci_write_datactrlreg(host, + host->variant->datactrl_mask_sdio); + } + /* Variants with mandatory busy timeout in HW needs R1B responses. */ if (variant->busy_timeout) mmc->caps |= MMC_CAP_NEED_RSP_BUSY; diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 34d9897c289b..a5eb4ced4d5d 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -331,6 +331,7 @@ enum mmci_busy_state { * register. * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register * @dma_lli: true if variant has dma link list feature. + * @supports_sdio_irq: allow SD I/O card to interrupt the host * @stm32_idmabsize_mask: stm32 sdmmc idma buffer size. * @dma_flow_controller: use peripheral as flow controller for DMA. */ @@ -377,6 +378,7 @@ struct variant_data { u32 start_err; u32 opendrain; u8 dma_lli:1; + bool supports_sdio_irq; u32 stm32_idmabsize_mask; u32 stm32_idmabsize_align; bool dma_flow_controller; -- cgit From 9cc811a342be78270d20d1fff3832bc0b23ef364 Mon Sep 17 00:00:00 2001 From: Drew Fustini Date: Thu, 9 Nov 2023 21:41:12 -0800 Subject: mmc: sdhci: add __sdhci_execute_tuning() to header Expose __sdhci_execute_tuning() so that it can be called from the mmc host controller drivers. In the sdhci-of-dwcmshc driver, sdhci_dwcmshc_th1520_ops sets platform_execute_tuning to th1520_execute_tuning(). That function has to manipulate phy registers before tuning can be performed. To avoid copying the code verbatim from __sdhci_execute_tuning() into th1520_execute_tuning(), make it possible for __sdhci_execute_tuning() to be called from sdhci-of-dwcmshc. Acked-by: Adrian Hunter Signed-off-by: Drew Fustini Link: https://lore.kernel.org/r/20231109-th1520-mmc-v5-2-018bd039cf17@baylibre.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci.c | 3 ++- drivers/mmc/host/sdhci.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index ff41aa56564e..c79f73459915 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2841,7 +2841,7 @@ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) } EXPORT_SYMBOL_GPL(sdhci_send_tuning); -static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) +int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) { int i; @@ -2879,6 +2879,7 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) sdhci_reset_tuning(host); return -EAGAIN; } +EXPORT_SYMBOL_GPL(__sdhci_execute_tuning); int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) { diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index f219bdea8f28..a20864fc0641 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -793,6 +793,7 @@ void sdhci_set_bus_width(struct sdhci_host *host, int width); void sdhci_reset(struct sdhci_host *host, u8 mask); void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); +int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode); void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios); -- cgit From 43658a542ebf13f1bb80cfaa8ae58f061d0d71b0 Mon Sep 17 00:00:00 2001 From: Drew Fustini Date: Thu, 9 Nov 2023 21:41:13 -0800 Subject: mmc: sdhci-of-dwcmshc: Add support for T-Head TH1520 Add support for the mmc controller in the T-Head TH1520 with the new compatible "thead,th1520-dwcmshc". Implement custom sdhci_ops for set_uhs_signaling, reset, voltage_switch, and platform_execute_tuning. Acked-by: Adrian Hunter Signed-off-by: Drew Fustini Link: https://lore.kernel.org/r/20231109-th1520-mmc-v5-3-018bd039cf17@baylibre.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-of-dwcmshc.c | 349 ++++++++++++++++++++++++++++++++++++ 1 file changed, 349 insertions(+) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index 3a3bae6948a8..0eb72544c09e 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -35,6 +36,21 @@ #define DWCMSHC_CARD_IS_EMMC BIT(0) #define DWCMSHC_ENHANCED_STROBE BIT(8) #define DWCMSHC_EMMC_ATCTRL 0x40 +/* Tuning and auto-tuning fields in AT_CTRL_R control register */ +#define AT_CTRL_AT_EN BIT(0) /* autotuning is enabled */ +#define AT_CTRL_CI_SEL BIT(1) /* interval to drive center phase select */ +#define AT_CTRL_SWIN_TH_EN BIT(2) /* sampling window threshold enable */ +#define AT_CTRL_RPT_TUNE_ERR BIT(3) /* enable reporting framing errors */ +#define AT_CTRL_SW_TUNE_EN BIT(4) /* enable software managed tuning */ +#define AT_CTRL_WIN_EDGE_SEL_MASK GENMASK(11, 8) /* bits [11:8] */ +#define AT_CTRL_WIN_EDGE_SEL 0xf /* sampling window edge select */ +#define AT_CTRL_TUNE_CLK_STOP_EN BIT(16) /* clocks stopped during phase code change */ +#define AT_CTRL_PRE_CHANGE_DLY_MASK GENMASK(18, 17) /* bits [18:17] */ +#define AT_CTRL_PRE_CHANGE_DLY 0x1 /* 2-cycle latency */ +#define AT_CTRL_POST_CHANGE_DLY_MASK GENMASK(20, 19) /* bits [20:19] */ +#define AT_CTRL_POST_CHANGE_DLY 0x3 /* 4-cycle latency */ +#define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */ +#define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */ /* Rockchip specific Registers */ #define DWCMSHC_EMMC_DLL_CTRL 0x800 @@ -72,6 +88,82 @@ (((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0)) #define RK35xx_MAX_CLKS 3 +/* PHY register area pointer */ +#define DWC_MSHC_PTR_PHY_R 0x300 + +/* PHY general configuration */ +#define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00) +#define PHY_CNFG_RSTN_DEASSERT 0x1 /* Deassert PHY reset */ +#define PHY_CNFG_PAD_SP_MASK GENMASK(19, 16) /* bits [19:16] */ +#define PHY_CNFG_PAD_SP 0x0c /* PMOS TX drive strength */ +#define PHY_CNFG_PAD_SN_MASK GENMASK(23, 20) /* bits [23:20] */ +#define PHY_CNFG_PAD_SN 0x0c /* NMOS TX drive strength */ + +/* PHY command/response pad settings */ +#define PHY_CMDPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x04) + +/* PHY data pad settings */ +#define PHY_DATAPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x06) + +/* PHY clock pad settings */ +#define PHY_CLKPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x08) + +/* PHY strobe pad settings */ +#define PHY_STBPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0a) + +/* PHY reset pad settings */ +#define PHY_RSTNPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0c) + +/* Bitfields are common for all pad settings */ +#define PHY_PAD_RXSEL_1V8 0x1 /* Receiver type select for 1.8V */ +#define PHY_PAD_RXSEL_3V3 0x2 /* Receiver type select for 3.3V */ + +#define PHY_PAD_WEAKPULL_MASK GENMASK(4, 3) /* bits [4:3] */ +#define PHY_PAD_WEAKPULL_PULLUP 0x1 /* Weak pull up enabled */ +#define PHY_PAD_WEAKPULL_PULLDOWN 0x2 /* Weak pull down enabled */ + +#define PHY_PAD_TXSLEW_CTRL_P_MASK GENMASK(8, 5) /* bits [8:5] */ +#define PHY_PAD_TXSLEW_CTRL_P 0x3 /* Slew control for P-Type pad TX */ +#define PHY_PAD_TXSLEW_CTRL_N_MASK GENMASK(12, 9) /* bits [12:9] */ +#define PHY_PAD_TXSLEW_CTRL_N 0x3 /* Slew control for N-Type pad TX */ + +/* PHY CLK delay line settings */ +#define PHY_SDCLKDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1d) +#define PHY_SDCLKDL_CNFG_UPDATE BIT(4) /* set before writing to SDCLKDL_DC */ + +/* PHY CLK delay line delay code */ +#define PHY_SDCLKDL_DC_R (DWC_MSHC_PTR_PHY_R + 0x1e) +#define PHY_SDCLKDL_DC_INITIAL 0x40 /* initial delay code */ +#define PHY_SDCLKDL_DC_DEFAULT 0x32 /* default delay code */ +#define PHY_SDCLKDL_DC_HS400 0x18 /* delay code for HS400 mode */ + +/* PHY drift_cclk_rx delay line configuration setting */ +#define PHY_ATDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x21) +#define PHY_ATDL_CNFG_INPSEL_MASK GENMASK(3, 2) /* bits [3:2] */ +#define PHY_ATDL_CNFG_INPSEL 0x3 /* delay line input source */ + +/* PHY DLL control settings */ +#define PHY_DLL_CTRL_R (DWC_MSHC_PTR_PHY_R + 0x24) +#define PHY_DLL_CTRL_DISABLE 0x0 /* PHY DLL is enabled */ +#define PHY_DLL_CTRL_ENABLE 0x1 /* PHY DLL is disabled */ + +/* PHY DLL configuration register 1 */ +#define PHY_DLL_CNFG1_R (DWC_MSHC_PTR_PHY_R + 0x25) +#define PHY_DLL_CNFG1_SLVDLY_MASK GENMASK(5, 4) /* bits [5:4] */ +#define PHY_DLL_CNFG1_SLVDLY 0x2 /* DLL slave update delay input */ +#define PHY_DLL_CNFG1_WAITCYCLE 0x5 /* DLL wait cycle input */ + +/* PHY DLL configuration register 2 */ +#define PHY_DLL_CNFG2_R (DWC_MSHC_PTR_PHY_R + 0x26) +#define PHY_DLL_CNFG2_JUMPSTEP 0xa /* DLL jump step input */ + +/* PHY DLL master and slave delay line configuration settings */ +#define PHY_DLLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x28) +#define PHY_DLLDL_CNFG_SLV_INPSEL_MASK GENMASK(6, 5) /* bits [6:5] */ +#define PHY_DLLDL_CNFG_SLV_INPSEL 0x3 /* clock source select for slave DL */ + +#define FLAG_IO_FIXED_1V8 BIT(0) + #define BOUNDARY_OK(addr, len) \ ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1))) @@ -92,6 +184,8 @@ struct dwcmshc_priv { struct clk *bus_clk; int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA reg */ void *priv; /* pointer to SoC private stuff */ + u16 delay_line; + u16 flags; }; /* @@ -157,6 +251,127 @@ static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_request(mmc, mrq); } +static void dwcmshc_phy_1_8v_init(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 val; + + /* deassert phy reset & set tx drive strength */ + val = PHY_CNFG_RSTN_DEASSERT; + val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP); + val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN); + sdhci_writel(host, val, PHY_CNFG_R); + + /* disable delay line */ + sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R); + + /* set delay line */ + sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R); + sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R); + + /* enable delay lane */ + val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); + val &= ~(PHY_SDCLKDL_CNFG_UPDATE); + sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); + + /* configure phy pads */ + val = PHY_PAD_RXSEL_1V8; + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_CMDPAD_CNFG_R); + sdhci_writew(host, val, PHY_DATAPAD_CNFG_R); + sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R); + + val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_CLKPAD_CNFG_R); + + val = PHY_PAD_RXSEL_1V8; + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_STBPAD_CNFG_R); + + /* enable data strobe mode */ + sdhci_writeb(host, FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK, PHY_DLLDL_CNFG_SLV_INPSEL), + PHY_DLLDL_CNFG_R); + + /* enable phy dll */ + sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R); +} + +static void dwcmshc_phy_3_3v_init(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 val; + + /* deassert phy reset & set tx drive strength */ + val = PHY_CNFG_RSTN_DEASSERT; + val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP); + val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN); + sdhci_writel(host, val, PHY_CNFG_R); + + /* disable delay line */ + sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R); + + /* set delay line */ + sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R); + sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R); + + /* enable delay lane */ + val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); + val &= ~(PHY_SDCLKDL_CNFG_UPDATE); + sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); + + /* configure phy pads */ + val = PHY_PAD_RXSEL_3V3; + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_CMDPAD_CNFG_R); + sdhci_writew(host, val, PHY_DATAPAD_CNFG_R); + sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R); + + val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_CLKPAD_CNFG_R); + + val = PHY_PAD_RXSEL_3V3; + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_STBPAD_CNFG_R); + + /* enable phy dll */ + sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R); +} + +static void th1520_sdhci_set_phy(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO; + u16 emmc_ctrl; + + /* Before power on, set PHY configs */ + if (priv->flags & FLAG_IO_FIXED_1V8) + dwcmshc_phy_1_8v_init(host); + else + dwcmshc_phy_3_3v_init(host); + + if ((host->mmc->caps2 & emmc_caps) == emmc_caps) { + emmc_ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL); + emmc_ctrl |= DWCMSHC_CARD_IS_EMMC; + sdhci_writew(host, emmc_ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL); + } + + sdhci_writeb(host, FIELD_PREP(PHY_DLL_CNFG1_SLVDLY_MASK, PHY_DLL_CNFG1_SLVDLY) | + PHY_DLL_CNFG1_WAITCYCLE, PHY_DLL_CNFG1_R); +} + static void dwcmshc_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) { @@ -189,9 +404,25 @@ static void dwcmshc_set_uhs_signaling(struct sdhci_host *host, ctrl_2 |= DWCMSHC_CTRL_HS400; } + if (priv->flags & FLAG_IO_FIXED_1V8) + ctrl_2 |= SDHCI_CTRL_VDD_180; sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); } +static void th1520_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + + dwcmshc_set_uhs_signaling(host, timing); + if (timing == MMC_TIMING_MMC_HS400) + priv->delay_line = PHY_SDCLKDL_DC_HS400; + else + sdhci_writeb(host, 0, PHY_DLLDL_CNFG_R); + th1520_sdhci_set_phy(host); +} + static void dwcmshc_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) { @@ -338,6 +569,79 @@ static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask) sdhci_reset(host, mask); } +static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 val = 0; + + if (host->flags & SDHCI_HS400_TUNING) + return 0; + + sdhci_writeb(host, FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL), + PHY_ATDL_CNFG_R); + val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); + + /* + * configure tuning settings: + * - center phase select code driven in block gap interval + * - disable reporting of framing errors + * - disable software managed tuning + * - disable user selection of sampling window edges, + * instead tuning calculated edges are used + */ + val &= ~(AT_CTRL_CI_SEL | AT_CTRL_RPT_TUNE_ERR | AT_CTRL_SW_TUNE_EN | + FIELD_PREP(AT_CTRL_WIN_EDGE_SEL_MASK, AT_CTRL_WIN_EDGE_SEL)); + + /* + * configure tuning settings: + * - enable auto-tuning + * - enable sampling window threshold + * - stop clocks during phase code change + * - set max latency in cycles between tx and rx clocks + * - set max latency in cycles to switch output phase + * - set max sampling window threshold value + */ + val |= AT_CTRL_AT_EN | AT_CTRL_SWIN_TH_EN | AT_CTRL_TUNE_CLK_STOP_EN; + val |= FIELD_PREP(AT_CTRL_PRE_CHANGE_DLY_MASK, AT_CTRL_PRE_CHANGE_DLY); + val |= FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, AT_CTRL_POST_CHANGE_DLY); + val |= FIELD_PREP(AT_CTRL_SWIN_TH_VAL_MASK, AT_CTRL_SWIN_TH_VAL); + + sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); + val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); + + /* perform tuning */ + sdhci_start_tuning(host); + host->tuning_err = __sdhci_execute_tuning(host, opcode); + if (host->tuning_err) { + /* disable auto-tuning upon tuning error */ + val &= ~AT_CTRL_AT_EN; + sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); + dev_err(mmc_dev(host->mmc), "tuning failed: %d\n", host->tuning_err); + return -EIO; + } + sdhci_end_tuning(host); + + return 0; +} + +static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + u16 ctrl_2; + + sdhci_reset(host, mask); + + if (priv->flags & FLAG_IO_FIXED_1V8) { + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (!(ctrl_2 & SDHCI_CTRL_VDD_180)) { + ctrl_2 |= SDHCI_CTRL_VDD_180; + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + } + } +} + static const struct sdhci_ops sdhci_dwcmshc_ops = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, @@ -356,6 +660,17 @@ static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = { .adma_write_desc = dwcmshc_adma_write_desc, }; +static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .set_uhs_signaling = th1520_set_uhs_signaling, + .get_max_clock = dwcmshc_get_max_clock, + .reset = th1520_sdhci_reset, + .adma_write_desc = dwcmshc_adma_write_desc, + .voltage_switch = dwcmshc_phy_1_8v_init, + .platform_execute_tuning = &th1520_execute_tuning, +}; + static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = { .ops = &sdhci_dwcmshc_ops, .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, @@ -379,6 +694,12 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = { SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, }; +static const struct sdhci_pltfm_data sdhci_dwcmshc_th1520_pdata = { + .ops = &sdhci_dwcmshc_th1520_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +}; + static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv) { int err; @@ -447,6 +768,10 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = { .compatible = "snps,dwcmshc-sdhci", .data = &sdhci_dwcmshc_pdata, }, + { + .compatible = "thead,th1520-dwcmshc", + .data = &sdhci_dwcmshc_th1520_pdata, + }, {}, }; MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids); @@ -542,6 +867,30 @@ static int dwcmshc_probe(struct platform_device *pdev) goto err_clk; } + if (pltfm_data == &sdhci_dwcmshc_th1520_pdata) { + priv->delay_line = PHY_SDCLKDL_DC_DEFAULT; + + if ((device_property_read_bool(dev, "mmc-ddr-1_8v")) | + (device_property_read_bool(dev, "mmc-hs200-1_8v")) | + (device_property_read_bool(dev, "mmc-hs400-1_8v"))) + priv->flags |= FLAG_IO_FIXED_1V8; + else + priv->flags &= ~FLAG_IO_FIXED_1V8; + + /* + * start_signal_voltage_switch() will try 3.3V first + * then 1.8V. Use SDHCI_SIGNALING_180 rather than + * SDHCI_SIGNALING_330 to avoid setting voltage to 3.3V + * in sdhci_start_signal_voltage_switch(). + */ + if (priv->flags & FLAG_IO_FIXED_1V8) { + host->flags &= ~SDHCI_SIGNALING_330; + host->flags |= SDHCI_SIGNALING_180; + } + + sdhci_enable_v4_mode(host); + } + #ifdef CONFIG_ACPI if (pltfm_data == &sdhci_dwcmshc_bf3_pdata) sdhci_enable_v4_mode(host); -- cgit From e18a38660786dbe8536ecf74563df25936a3532a Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Thu, 16 Nov 2023 18:46:00 -0700 Subject: mmc: sdhci-of-dwcmshc: Use logical OR instead of bitwise OR in dwcmshc_probe() Clang warns (or errors with CONFIG_WERROR=y): drivers/mmc/host/sdhci-of-dwcmshc.c:873:7: error: use of bitwise '|' with boolean operands [-Werror,-Wbitwise-instead-of-logical] 873 | if ((device_property_read_bool(dev, "mmc-ddr-1_8v")) | | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 874 | (device_property_read_bool(dev, "mmc-hs200-1_8v")) | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | || 875 | (device_property_read_bool(dev, "mmc-hs400-1_8v"))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/mmc/host/sdhci-of-dwcmshc.c:873:7: note: cast one or both operands to int to silence this warning drivers/mmc/host/sdhci-of-dwcmshc.c:873:7: error: use of bitwise '|' with boolean operands [-Werror,-Wbitwise-instead-of-logical] 873 | if ((device_property_read_bool(dev, "mmc-ddr-1_8v")) | | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | || 874 | (device_property_read_bool(dev, "mmc-hs200-1_8v")) | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/mmc/host/sdhci-of-dwcmshc.c:873:7: note: cast one or both operands to int to silence this warning 2 errors generated. There is little reason for this if statement to use bitwise ORs, as the short circuiting of logical OR does not need to be avoided in this context; it would be wasteful to call device_property_read_bool() three times if the first two calls returned true. Switch to logical OR to fix the warning. While in the area, the parentheses around the calls to device_property_read_bool() are not necessary and make the if statement harder to read, so remove them. Closes: https://github.com/ClangBuiltLinux/linux/issues/1960 Signed-off-by: Nathan Chancellor Acked-by: Adrian Hunter Tested-by: Drew Fustini Link: https://lore.kernel.org/r/20231116-sdhci-of-dwcmshc-fix-wbitwise-instead-of-logical-v1-1-7e1a7f4ccaab@kernel.org Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-of-dwcmshc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index 0eb72544c09e..a1f57af6acfb 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -870,9 +870,9 @@ static int dwcmshc_probe(struct platform_device *pdev) if (pltfm_data == &sdhci_dwcmshc_th1520_pdata) { priv->delay_line = PHY_SDCLKDL_DC_DEFAULT; - if ((device_property_read_bool(dev, "mmc-ddr-1_8v")) | - (device_property_read_bool(dev, "mmc-hs200-1_8v")) | - (device_property_read_bool(dev, "mmc-hs400-1_8v"))) + if (device_property_read_bool(dev, "mmc-ddr-1_8v") || + device_property_read_bool(dev, "mmc-hs200-1_8v") || + device_property_read_bool(dev, "mmc-hs400-1_8v")) priv->flags |= FLAG_IO_FIXED_1V8; else priv->flags &= ~FLAG_IO_FIXED_1V8; -- cgit From 5cb2f9286a31f33dc732c57540838ad9339393ab Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Wed, 22 Nov 2023 11:32:14 +0530 Subject: mmc: sdhci_am654: Drop lookup for deprecated ti,otap-del-sel ti,otap-del-sel has been deprecated since v5.7 and there are no users of this property and no documentation in the DT bindings either. Drop the fallback code looking for this property, this makes sdhci_am654_get_otap_delay() much easier to read as all the TAP values can be handled via a single iterator loop. Signed-off-by: Vignesh Raghavendra Acked-by: Adrian Hunter Link: https://lore.kernel.org/r/20231122060215.2074799-1-vigneshr@ti.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci_am654.c | 37 ++++++------------------------------- 1 file changed, 6 insertions(+), 31 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 967bd2dfcda1..d659c59422e1 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -141,7 +141,6 @@ static const struct timing_data td[] = { struct sdhci_am654_data { struct regmap *base; - bool legacy_otapdly; int otap_del_sel[ARRAY_SIZE(td)]; int itap_del_sel[ARRAY_SIZE(td)]; int clkbuf_sel; @@ -272,11 +271,7 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock) sdhci_set_clock(host, clock); /* Setup DLL Output TAP delay */ - if (sdhci_am654->legacy_otapdly) - otap_del_sel = sdhci_am654->otap_del_sel[0]; - else - otap_del_sel = sdhci_am654->otap_del_sel[timing]; - + otap_del_sel = sdhci_am654->otap_del_sel[timing]; otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0; mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; @@ -314,10 +309,7 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host, u32 mask, val; /* Setup DLL Output TAP delay */ - if (sdhci_am654->legacy_otapdly) - otap_del_sel = sdhci_am654->otap_del_sel[0]; - else - otap_del_sel = sdhci_am654->otap_del_sel[timing]; + otap_del_sel = sdhci_am654->otap_del_sel[timing]; mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; val = (0x1 << OTAPDLYENA_SHIFT) | @@ -577,32 +569,15 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host, int i; int ret; - ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].otap_binding, - &sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]); - if (ret) { - /* - * ti,otap-del-sel-legacy is mandatory, look for old binding - * if not found. - */ - ret = device_property_read_u32(dev, "ti,otap-del-sel", - &sdhci_am654->otap_del_sel[0]); - if (ret) { - dev_err(dev, "Couldn't find otap-del-sel\n"); - - return ret; - } - - dev_info(dev, "Using legacy binding ti,otap-del-sel\n"); - sdhci_am654->legacy_otapdly = true; - - return 0; - } - for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) { ret = device_property_read_u32(dev, td[i].otap_binding, &sdhci_am654->otap_del_sel[i]); if (ret) { + if (i == MMC_TIMING_LEGACY) { + dev_err(dev, "Couldn't find mandatory ti,otap-del-sel-legacy\n"); + return ret; + } dev_dbg(dev, "Couldn't find %s\n", td[i].otap_binding); /* -- cgit From 4d0c8d0aef6355660b6775d57ccd5d4ea2e15802 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Wed, 29 Nov 2023 11:25:35 +0200 Subject: mmc: core: Use mrq.sbc in close-ended ffu Field Firmware Update (ffu) may use close-ended or open ended sequence. Each such sequence is comprised of a write commands enclosed between 2 switch commands - to and from ffu mode. So for the close-ended case, it will be: cmd6->cmd23-cmd25-cmd6. Some host controllers however, get confused when multi-block rw is sent without sbc, and may generate auto-cmd12 which breaks the ffu sequence. I encountered this issue while testing fwupd (github.com/fwupd/fwupd) on HP Chromebook x2, a qualcomm based QC-7c, code name - strongbad. Instead of a quirk, or hooking the request function of the msm ops, it would be better to fix the ioctl handling and make it use mrq.sbc instead of issuing SET_BLOCK_COUNT separately. Signed-off-by: Avri Altman Acked-by: Adrian Hunter Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20231129092535.3278-1-avri.altman@wdc.com Signed-off-by: Ulf Hansson --- drivers/mmc/core/block.c | 46 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index f9a5cffa64b1..892e74e611a0 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -400,6 +400,10 @@ struct mmc_blk_ioc_data { struct mmc_ioc_cmd ic; unsigned char *buf; u64 buf_bytes; + unsigned int flags; +#define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */ +#define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */ + struct mmc_rpmb_data *rpmb; }; @@ -465,7 +469,7 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, } static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, - struct mmc_blk_ioc_data *idata) + struct mmc_blk_ioc_data **idatas, int i) { struct mmc_command cmd = {}, sbc = {}; struct mmc_data data = {}; @@ -475,10 +479,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, unsigned int busy_timeout_ms; int err; unsigned int target_part; + struct mmc_blk_ioc_data *idata = idatas[i]; + struct mmc_blk_ioc_data *prev_idata = NULL; if (!card || !md || !idata) return -EINVAL; + if (idata->flags & MMC_BLK_IOC_DROP) + return 0; + + if (idata->flags & MMC_BLK_IOC_SBC) + prev_idata = idatas[i - 1]; + /* * The RPMB accesses comes in from the character device, so we * need to target these explicitly. Else we just target the @@ -532,7 +544,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, return err; } - if (idata->rpmb) { + if (idata->rpmb || prev_idata) { sbc.opcode = MMC_SET_BLOCK_COUNT; /* * We don't do any blockcount validation because the max size @@ -540,6 +552,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, * 'Reliable Write' bit here. */ sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); + if (prev_idata) + sbc.arg = prev_idata->ic.arg; sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; mrq.sbc = &sbc; } @@ -557,6 +571,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, mmc_wait_for_req(card->host, &mrq); memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); + if (prev_idata) { + memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp)); + if (sbc.error) { + dev_err(mmc_dev(card->host), "%s: sbc error %d\n", + __func__, sbc.error); + return sbc.error; + } + } + if (cmd.error) { dev_err(mmc_dev(card->host), "%s: cmd error %d\n", __func__, cmd.error); @@ -1032,6 +1055,20 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) md->reset_done &= ~type; } +static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq) +{ + struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data; + int i; + + for (i = 1; i < mq_rq->ioc_count; i++) { + if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT && + mmc_op_multi(idata[i]->ic.opcode)) { + idata[i - 1]->flags |= MMC_BLK_IOC_DROP; + idata[i]->flags |= MMC_BLK_IOC_SBC; + } + } +} + /* * The non-block commands come back from the block layer after it queued it and * processed it with all other requests and then they get issued in this @@ -1059,11 +1096,14 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) if (ret) break; } + + mmc_blk_check_sbc(mq_rq); + fallthrough; case MMC_DRV_OP_IOCTL_RPMB: idata = mq_rq->drv_op_data; for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { - ret = __mmc_blk_ioctl_cmd(card, md, idata[i]); + ret = __mmc_blk_ioctl_cmd(card, md, idata, i); if (ret) break; } -- cgit From 9463571b29bf03a0dc1114f0cb17042b179066f9 Mon Sep 17 00:00:00 2001 From: Pin-yen Lin Date: Fri, 1 Dec 2023 18:26:51 +0800 Subject: mmc: mtk-sd: Increase the verbosity of msdc_track_cmd_data This log message is necessary for debugging, so enable it by default to debug issues that are hard to reproduce locally. Signed-off-by: Pin-yen Lin Reviewed-by: Wenbin Mei Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20231201102747.3854573-1-treapking@chromium.org Signed-off-by: Ulf Hansson --- drivers/mmc/host/mtk-sd.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 97f7c3d4be6e..6ae5e0a9fca9 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -1149,9 +1149,11 @@ static void msdc_recheck_sdio_irq(struct msdc_host *host) static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd) { - if (host->error) - dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", - __func__, cmd->opcode, cmd->arg, host->error); + if (host->error && + ((!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning) || + cmd->error == -ETIMEDOUT)) + dev_warn(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); } static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) -- cgit From 1f30f51053715af81a25d902511079aca3fdb213 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 5 Dec 2023 21:58:55 -0800 Subject: mmc: sdhci-omap: don't misuse kernel-doc marker Use "/*" instead of "/**" for common C comments to prevent warnings from scripts/kernel-doc. sdhci-omap.c:3: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst sdhci-omap.c:3: warning: missing initial short description on line: * SDHCI Controller driver for TI's OMAP SoCs Signed-off-by: Randy Dunlap Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202311201117.lFxgJTK6-lkp@intel.com/ Link: https://lore.kernel.org/r/20231206055855.21092-1-rdunlap@infradead.org Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-omap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 1e0bc7bace1b..e78faef67d7a 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * SDHCI Controller driver for TI's OMAP SoCs * * Copyright (C) 2017 Texas Instruments -- cgit From d3ddafd34bc4e353dc9fc2d193abc4c4463dc588 Mon Sep 17 00:00:00 2001 From: Axe Yang Date: Thu, 7 Dec 2023 14:35:35 +0800 Subject: mmc: mtk-sd: Extend number of tuning steps Previously, during the MSDC calibration process, a full clock cycle actually not be covered, which in some cases didn't yield the best results and could cause CRC errors. This problem is particularly evident when MSDC is used as an SDIO host. In fact, MSDC support tuning up to a maximum of 64 steps, but by default, the step number is 32. By increase the tuning step, we are more likely to cover more parts of a clock cycle, and get better calibration result. To illustrate, when tuning 32 steps, if the obtained window has a hole near the middle, like this: 0xffc07ff (hex), then the selected delay will be the 6 (counting from right to left). (32 <- 1) 1111 1111 1100 0000 0000 0111 11(1)1 1111 However, if we tune 64 steps, the window obtained may look like this: 0xfffffffffffc07ff. The final selected delay will be 44, which is safer as it is further away from the hole: (64 <- 1) 1111 ... (1)111 1111 1111 1111 1111 1100 0000 0000 0111 1111 1111 In this case, delay 6 selected through 32 steps tuning is obviously not optimal, and this delay is closer to the hole, using it would easily cause CRC problems. As per mesaurements taken on mediatek SoC platform, the tuning phase will take: eMMC - 32 steps: ~3ms - 64 steps: ~6ms SDIO - 32 steps: ~4ms - 64 steos: ~7ms Tuning more steps won't prolong boot times by any meaningful amount of time, so for SD/SDIO the default tuning steps will be adjust to 64. But for eMMC, it is still preferred to use 32 steps tuning as otherwise there would be performance lose when accessing the RPMB partition(requiring retuning each time). You can configure property "mediatek,tuning-step" in MSDC dts node to adjust the step number. Signed-off-by: Axe Yang Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20231207063535.29546-3-axe.yang@mediatek.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/mtk-sd.c | 158 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 110 insertions(+), 48 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 6ae5e0a9fca9..1634b1f5d201 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -252,12 +252,16 @@ #define MSDC_PAD_TUNE_DATWRDLY GENMASK(4, 0) /* RW */ #define MSDC_PAD_TUNE_DATRRDLY GENMASK(12, 8) /* RW */ +#define MSDC_PAD_TUNE_DATRRDLY2 GENMASK(12, 8) /* RW */ #define MSDC_PAD_TUNE_CMDRDLY GENMASK(20, 16) /* RW */ +#define MSDC_PAD_TUNE_CMDRDLY2 GENMASK(20, 16) /* RW */ #define MSDC_PAD_TUNE_CMDRRDLY GENMASK(26, 22) /* RW */ #define MSDC_PAD_TUNE_CLKTDLY GENMASK(31, 27) /* RW */ #define MSDC_PAD_TUNE_RXDLYSEL BIT(15) /* RW */ #define MSDC_PAD_TUNE_RD_SEL BIT(13) /* RW */ #define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */ +#define MSDC_PAD_TUNE_RD2_SEL BIT(13) /* RW */ +#define MSDC_PAD_TUNE_CMD2_SEL BIT(21) /* RW */ #define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */ #define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */ @@ -325,7 +329,9 @@ #define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */ -#define PAD_DELAY_MAX 32 /* PAD delay cells */ +#define TUNING_REG2_FIXED_OFFEST 4 +#define PAD_DELAY_HALF 32 /* PAD delay cells */ +#define PAD_DELAY_FULL 64 /*--------------------------------------------------------------------------*/ /* Descriptor Structure */ /*--------------------------------------------------------------------------*/ @@ -461,6 +467,7 @@ struct msdc_host { u32 hs400_ds_dly3; u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */ u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */ + u32 tuning_step; bool hs400_cmd_resp_sel_rising; /* cmd response sample selection for HS400 */ bool hs400_mode; /* current eMMC will run at hs400 mode */ @@ -1617,7 +1624,7 @@ static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts) } if (cmd_err || dat_err) { - dev_err(host->dev, "cmd_err = %d, dat_err =%d, intsts = 0x%x", + dev_err(host->dev, "cmd_err = %d, dat_err = %d, intsts = 0x%x", cmd_err, dat_err, intsts); } @@ -1782,10 +1789,20 @@ static void msdc_init_hw(struct msdc_host *host) DATA_K_VALUE_SEL); sdr_set_bits(host->top_base + EMMC_TOP_CMD, PAD_CMD_RD_RXDLY_SEL); + if (host->tuning_step > PAD_DELAY_HALF) { + sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY2_SEL); + sdr_set_bits(host->top_base + EMMC_TOP_CMD, + PAD_CMD_RD_RXDLY2_SEL); + } } else { sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL); + if (host->tuning_step > PAD_DELAY_HALF) + sdr_set_bits(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, + MSDC_PAD_TUNE_RD2_SEL | + MSDC_PAD_TUNE_CMD2_SEL); } } else { /* choose clock tune */ @@ -1927,24 +1944,24 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) msdc_set_mclk(host, ios->timing, ios->clock); } -static u32 test_delay_bit(u32 delay, u32 bit) +static u64 test_delay_bit(u64 delay, u32 bit) { - bit %= PAD_DELAY_MAX; - return delay & BIT(bit); + bit %= PAD_DELAY_FULL; + return delay & BIT_ULL(bit); } -static int get_delay_len(u32 delay, u32 start_bit) +static int get_delay_len(u64 delay, u32 start_bit) { int i; - for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) { + for (i = 0; i < (PAD_DELAY_FULL - start_bit); i++) { if (test_delay_bit(delay, start_bit + i) == 0) return i; } - return PAD_DELAY_MAX - start_bit; + return PAD_DELAY_FULL - start_bit; } -static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay) +static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u64 delay) { int start = 0, len = 0; int start_final = 0, len_final = 0; @@ -1952,28 +1969,28 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay) struct msdc_delay_phase delay_phase = { 0, }; if (delay == 0) { - dev_err(host->dev, "phase error: [map:%x]\n", delay); + dev_err(host->dev, "phase error: [map:%016llx]\n", delay); delay_phase.final_phase = final_phase; return delay_phase; } - while (start < PAD_DELAY_MAX) { + while (start < PAD_DELAY_FULL) { len = get_delay_len(delay, start); if (len_final < len) { start_final = start; len_final = len; } start += len ? len : 1; - if (len >= 12 && start_final < 4) + if (!upper_32_bits(delay) && len >= 12 && start_final < 4) break; } /* The rule is that to find the smallest delay cell */ if (start_final == 0) - final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX; + final_phase = (start_final + len_final / 3) % PAD_DELAY_FULL; else - final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX; - dev_dbg(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n", + final_phase = (start_final + len_final / 2) % PAD_DELAY_FULL; + dev_dbg(host->dev, "phase: [map:%016llx] [maxlen:%d] [final:%d]\n", delay, len_final, final_phase); delay_phase.maxlen = len_final; @@ -1986,30 +2003,64 @@ static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value) { u32 tune_reg = host->dev_comp->pad_tune_reg; - if (host->top_base) - sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, - value); - else - sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, - value); + if (host->top_base) { + if (value < PAD_DELAY_HALF) { + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, value); + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, 0); + } else { + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, + PAD_DELAY_HALF - 1); + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, + value - PAD_DELAY_HALF); + } + } else { + if (value < PAD_DELAY_HALF) { + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, value); + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, + MSDC_PAD_TUNE_CMDRDLY2, 0); + } else { + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, + PAD_DELAY_HALF - 1); + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, + MSDC_PAD_TUNE_CMDRDLY2, value - PAD_DELAY_HALF); + } + } } static inline void msdc_set_data_delay(struct msdc_host *host, u32 value) { u32 tune_reg = host->dev_comp->pad_tune_reg; - if (host->top_base) - sdr_set_field(host->top_base + EMMC_TOP_CONTROL, - PAD_DAT_RD_RXDLY, value); - else - sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, - value); + if (host->top_base) { + if (value < PAD_DELAY_HALF) { + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY, value); + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY2, 0); + } else { + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY, PAD_DELAY_HALF - 1); + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY2, value - PAD_DELAY_HALF); + } + } else { + if (value < PAD_DELAY_HALF) { + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, value); + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, + MSDC_PAD_TUNE_DATRRDLY2, 0); + } else { + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, + PAD_DELAY_HALF - 1); + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, + MSDC_PAD_TUNE_DATRRDLY2, value - PAD_DELAY_HALF); + } + } } static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); - u32 rise_delay = 0, fall_delay = 0; + u64 rise_delay = 0, fall_delay = 0; struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; struct msdc_delay_phase internal_delay_phase; u8 final_delay, final_maxlen; @@ -2025,7 +2076,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) host->hs200_cmd_int_delay); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - for (i = 0 ; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_cmd_delay(host, i); /* * Using the same parameters, it may sometimes pass the test, @@ -2035,9 +2086,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) for (j = 0; j < 3; j++) { mmc_send_tuning(mmc, opcode, &cmd_err); if (!cmd_err) { - rise_delay |= BIT(i); + rise_delay |= BIT_ULL(i); } else { - rise_delay &= ~BIT(i); + rise_delay &= ~BIT_ULL(i); break; } } @@ -2049,7 +2100,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) goto skip_fall; sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - for (i = 0; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_cmd_delay(host, i); /* * Using the same parameters, it may sometimes pass the test, @@ -2059,9 +2110,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) for (j = 0; j < 3; j++) { mmc_send_tuning(mmc, opcode, &cmd_err); if (!cmd_err) { - fall_delay |= BIT(i); + fall_delay |= BIT_ULL(i); } else { - fall_delay &= ~BIT(i); + fall_delay &= ~BIT_ULL(i); break; } } @@ -2084,12 +2135,12 @@ skip_fall: if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay) goto skip_internal; - for (i = 0; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, i); mmc_send_tuning(mmc, opcode, &cmd_err); if (!cmd_err) - internal_delay |= BIT(i); + internal_delay |= BIT_ULL(i); } dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay); internal_delay_phase = get_best_delay(host, internal_delay); @@ -2123,7 +2174,8 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); else sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - for (i = 0 ; i < PAD_DELAY_MAX; i++) { + + for (i = 0; i < PAD_DELAY_HALF; i++) { sdr_set_field(host->base + PAD_CMD_TUNE, PAD_CMD_TUNE_RX_DLY3, i); /* @@ -2153,7 +2205,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); - u32 rise_delay = 0, fall_delay = 0; + u64 rise_delay = 0, fall_delay = 0; struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; u8 final_delay, final_maxlen; int i, ret; @@ -2162,11 +2214,11 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) host->latch_ck); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); - for (i = 0 ; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_data_delay(host, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) - rise_delay |= BIT(i); + rise_delay |= BIT_ULL(i); } final_rise_delay = get_best_delay(host, rise_delay); /* if rising edge has enough margin, then do not scan falling edge */ @@ -2176,11 +2228,11 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); - for (i = 0; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_data_delay(host, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) - fall_delay |= BIT(i); + fall_delay |= BIT_ULL(i); } final_fall_delay = get_best_delay(host, fall_delay); @@ -2208,7 +2260,7 @@ skip_fall: static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); - u32 rise_delay = 0, fall_delay = 0; + u64 rise_delay = 0, fall_delay = 0; struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; u8 final_delay, final_maxlen; int i, ret; @@ -2219,12 +2271,12 @@ static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); - for (i = 0 ; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_cmd_delay(host, i); msdc_set_data_delay(host, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) - rise_delay |= BIT(i); + rise_delay |= BIT_ULL(i); } final_rise_delay = get_best_delay(host, rise_delay); /* if rising edge has enough margin, then do not scan falling edge */ @@ -2235,12 +2287,12 @@ static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); - for (i = 0; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_cmd_delay(host, i); msdc_set_data_delay(host, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) - fall_delay |= BIT(i); + fall_delay |= BIT_ULL(i); } final_fall_delay = get_best_delay(host, fall_delay); @@ -2348,7 +2400,7 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card } host->hs400_tuning = true; - for (i = 0; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < PAD_DELAY_HALF; i++) { if (host->top_base) sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, PAD_DS_DLY1, i); @@ -2582,6 +2634,8 @@ static const struct cqhci_host_ops msdc_cmdq_ops = { static void msdc_of_property_parse(struct platform_device *pdev, struct msdc_host *host) { + struct mmc_host *mmc = mmc_from_priv(host); + of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck", &host->latch_ck); @@ -2603,6 +2657,14 @@ static void msdc_of_property_parse(struct platform_device *pdev, else host->hs400_cmd_resp_sel_rising = false; + if (of_property_read_u32(pdev->dev.of_node, "mediatek,tuning-step", + &host->tuning_step)) { + if (mmc->caps2 & MMC_CAP2_NO_MMC) + host->tuning_step = PAD_DELAY_FULL; + else + host->tuning_step = PAD_DELAY_HALF; + } + if (of_property_read_bool(pdev->dev.of_node, "supports-cqe")) host->cqhci = true; -- cgit From 84a6be7db9050dd2601c9870f65eab9a665d2d5d Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 8 Dec 2023 00:19:01 +0200 Subject: mmc: mmc_spi: remove custom DMA mapped buffers There is no need to duplicate what SPI core or individual controller drivers already do, i.e. mapping the buffers for DMA capable transfers. Note, that the code, besides its redundancy, was buggy: strictly speaking there is no guarantee, while it's true for those which can use this code (see below), that the SPI host controller _is_ the device which does DMA. Also see the Link tags below. Additional notes. Currently only two SPI host controller drivers may use premapped (by the user) DMA buffers: - drivers/spi/spi-au1550.c - drivers/spi/spi-fsl-spi.c Both of them have DMA mapping support code. I don't expect that SPI host controller code is worse than what has been done in mmc_spi. Hence I do not expect any regressions here. Otherwise, I'm pretty much sure these regressions have to be fixed in the respective drivers, and not here. That said, remove all related pieces of DMA mapping code from mmc_spi. Link: https://lore.kernel.org/linux-mmc/c73b9ba9-1699-2aff-e2fd-b4b4f292a3ca@raspberrypi.org/ Link: https://stackoverflow.com/questions/67620728/mmc-spi-issue-not-able-to-setup-mmc-sd-card-in-linux Signed-off-by: Andy Shevchenko Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20231207221901.3259962-1-andriy.shevchenko@linux.intel.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmc_spi.c | 186 ++------------------------------------------- 1 file changed, 5 insertions(+), 181 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index cc333ad67cac..2a99ffb61f8c 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -119,19 +119,14 @@ struct mmc_spi_host { struct spi_transfer status; struct spi_message readback; - /* underlying DMA-aware controller, or null */ - struct device *dma_dev; - /* buffer used for commands and for message "overhead" */ struct scratch *data; - dma_addr_t data_dma; /* Specs say to write ones most of the time, even when the card * has no need to read its input data; and many cards won't care. * This is our source of those ones. */ void *ones; - dma_addr_t ones_dma; }; @@ -147,11 +142,8 @@ static inline int mmc_cs_off(struct mmc_spi_host *host) return spi_setup(host->spi); } -static int -mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) +static int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned int len) { - int status; - if (len > sizeof(*host->data)) { WARN_ON(1); return -EIO; @@ -159,19 +151,7 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) host->status.len = len; - if (host->dma_dev) - dma_sync_single_for_device(host->dma_dev, - host->data_dma, sizeof(*host->data), - DMA_FROM_DEVICE); - - status = spi_sync_locked(host->spi, &host->readback); - - if (host->dma_dev) - dma_sync_single_for_cpu(host->dma_dev, - host->data_dma, sizeof(*host->data), - DMA_FROM_DEVICE); - - return status; + return spi_sync_locked(host->spi, &host->readback); } static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, @@ -506,23 +486,11 @@ mmc_spi_command_send(struct mmc_spi_host *host, t = &host->t; memset(t, 0, sizeof(*t)); t->tx_buf = t->rx_buf = data->status; - t->tx_dma = t->rx_dma = host->data_dma; t->len = cp - data->status; t->cs_change = 1; spi_message_add_tail(t, &host->m); - if (host->dma_dev) { - host->m.is_dma_mapped = 1; - dma_sync_single_for_device(host->dma_dev, - host->data_dma, sizeof(*host->data), - DMA_BIDIRECTIONAL); - } status = spi_sync_locked(host->spi, &host->m); - - if (host->dma_dev) - dma_sync_single_for_cpu(host->dma_dev, - host->data_dma, sizeof(*host->data), - DMA_BIDIRECTIONAL); if (status < 0) { dev_dbg(&host->spi->dev, " ... write returned %d\n", status); cmd->error = status; @@ -540,9 +508,6 @@ mmc_spi_command_send(struct mmc_spi_host *host, * We always provide TX data for data and CRC. The MMC/SD protocol * requires us to write ones; but Linux defaults to writing zeroes; * so we explicitly initialize it to all ones on RX paths. - * - * We also handle DMA mapping, so the underlying SPI controller does - * not need to (re)do it for each message. */ static void mmc_spi_setup_data_message( @@ -552,11 +517,8 @@ mmc_spi_setup_data_message( { struct spi_transfer *t; struct scratch *scratch = host->data; - dma_addr_t dma = host->data_dma; spi_message_init(&host->m); - if (dma) - host->m.is_dma_mapped = 1; /* for reads, readblock() skips 0xff bytes before finding * the token; for writes, this transfer issues that token. @@ -570,8 +532,6 @@ mmc_spi_setup_data_message( else scratch->data_token = SPI_TOKEN_SINGLE; t->tx_buf = &scratch->data_token; - if (dma) - t->tx_dma = dma + offsetof(struct scratch, data_token); spi_message_add_tail(t, &host->m); } @@ -581,7 +541,6 @@ mmc_spi_setup_data_message( t = &host->t; memset(t, 0, sizeof(*t)); t->tx_buf = host->ones; - t->tx_dma = host->ones_dma; /* length and actual buffer info are written later */ spi_message_add_tail(t, &host->m); @@ -591,14 +550,9 @@ mmc_spi_setup_data_message( if (direction == DMA_TO_DEVICE) { /* the actual CRC may get written later */ t->tx_buf = &scratch->crc_val; - if (dma) - t->tx_dma = dma + offsetof(struct scratch, crc_val); } else { t->tx_buf = host->ones; - t->tx_dma = host->ones_dma; t->rx_buf = &scratch->crc_val; - if (dma) - t->rx_dma = dma + offsetof(struct scratch, crc_val); } spi_message_add_tail(t, &host->m); @@ -621,10 +575,7 @@ mmc_spi_setup_data_message( memset(t, 0, sizeof(*t)); t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1; t->tx_buf = host->ones; - t->tx_dma = host->ones_dma; t->rx_buf = scratch->status; - if (dma) - t->rx_dma = dma + offsetof(struct scratch, status); t->cs_change = 1; spi_message_add_tail(t, &host->m); } @@ -653,23 +604,13 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, if (host->mmc->use_spi_crc) scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len)); - if (host->dma_dev) - dma_sync_single_for_device(host->dma_dev, - host->data_dma, sizeof(*scratch), - DMA_BIDIRECTIONAL); status = spi_sync_locked(spi, &host->m); - if (status != 0) { dev_dbg(&spi->dev, "write error (%d)\n", status); return status; } - if (host->dma_dev) - dma_sync_single_for_cpu(host->dma_dev, - host->data_dma, sizeof(*scratch), - DMA_BIDIRECTIONAL); - /* * Get the transmission data-response reply. It must follow * immediately after the data block we transferred. This reply @@ -718,8 +659,6 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, } t->tx_buf += t->len; - if (host->dma_dev) - t->tx_dma += t->len; /* Return when not busy. If we didn't collect that status yet, * we'll need some more I/O. @@ -783,30 +722,12 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, } leftover = status << 1; - if (host->dma_dev) { - dma_sync_single_for_device(host->dma_dev, - host->data_dma, sizeof(*scratch), - DMA_BIDIRECTIONAL); - dma_sync_single_for_device(host->dma_dev, - t->rx_dma, t->len, - DMA_FROM_DEVICE); - } - status = spi_sync_locked(spi, &host->m); if (status < 0) { dev_dbg(&spi->dev, "read error %d\n", status); return status; } - if (host->dma_dev) { - dma_sync_single_for_cpu(host->dma_dev, - host->data_dma, sizeof(*scratch), - DMA_BIDIRECTIONAL); - dma_sync_single_for_cpu(host->dma_dev, - t->rx_dma, t->len, - DMA_FROM_DEVICE); - } - if (bitshift) { /* Walk through the data and the crc and do * all the magic to get byte-aligned data. @@ -841,8 +762,6 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, } t->rx_buf += t->len; - if (host->dma_dev) - t->rx_dma += t->len; return 0; } @@ -857,7 +776,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, struct mmc_data *data, u32 blk_size) { struct spi_device *spi = host->spi; - struct device *dma_dev = host->dma_dev; struct spi_transfer *t; enum dma_data_direction direction = mmc_get_dma_dir(data); struct scatterlist *sg; @@ -884,31 +802,8 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, */ for_each_sg(data->sg, sg, data->sg_len, n_sg) { int status = 0; - dma_addr_t dma_addr = 0; void *kmap_addr; unsigned length = sg->length; - enum dma_data_direction dir = direction; - - /* set up dma mapping for controller drivers that might - * use DMA ... though they may fall back to PIO - */ - if (dma_dev) { - /* never invalidate whole *shared* pages ... */ - if ((sg->offset != 0 || length != PAGE_SIZE) - && dir == DMA_FROM_DEVICE) - dir = DMA_BIDIRECTIONAL; - - dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, - PAGE_SIZE, dir); - if (dma_mapping_error(dma_dev, dma_addr)) { - data->error = -EFAULT; - break; - } - if (direction == DMA_TO_DEVICE) - t->tx_dma = dma_addr + sg->offset; - else - t->rx_dma = dma_addr + sg->offset; - } /* allow pio too; we don't allow highmem */ kmap_addr = kmap(sg_page(sg)); @@ -941,8 +836,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, if (direction == DMA_FROM_DEVICE) flush_dcache_page(sg_page(sg)); kunmap(sg_page(sg)); - if (dma_dev) - dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir); if (status < 0) { data->error = status; @@ -977,21 +870,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, scratch->status[0] = SPI_TOKEN_STOP_TRAN; host->early_status.tx_buf = host->early_status.rx_buf; - host->early_status.tx_dma = host->early_status.rx_dma; host->early_status.len = statlen; - if (host->dma_dev) - dma_sync_single_for_device(host->dma_dev, - host->data_dma, sizeof(*scratch), - DMA_BIDIRECTIONAL); - tmp = spi_sync_locked(spi, &host->m); - - if (host->dma_dev) - dma_sync_single_for_cpu(host->dma_dev, - host->data_dma, sizeof(*scratch), - DMA_BIDIRECTIONAL); - if (tmp < 0) { if (!data->error) data->error = tmp; @@ -1265,52 +1146,6 @@ mmc_spi_detect_irq(int irq, void *mmc) return IRQ_HANDLED; } -#ifdef CONFIG_HAS_DMA -static int mmc_spi_dma_alloc(struct mmc_spi_host *host) -{ - struct spi_device *spi = host->spi; - struct device *dev; - - if (!spi->master->dev.parent->dma_mask) - return 0; - - dev = spi->master->dev.parent; - - host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, host->ones_dma)) - return -ENOMEM; - - host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data), - DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, host->data_dma)) { - dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE, - DMA_TO_DEVICE); - return -ENOMEM; - } - - dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data), - DMA_BIDIRECTIONAL); - - host->dma_dev = dev; - return 0; -} - -static void mmc_spi_dma_free(struct mmc_spi_host *host) -{ - if (!host->dma_dev) - return; - - dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE, - DMA_TO_DEVICE); - dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), - DMA_BIDIRECTIONAL); -} -#else -static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; } -static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {} -#endif - static int mmc_spi_probe(struct spi_device *spi) { void *ones; @@ -1402,24 +1237,17 @@ static int mmc_spi_probe(struct spi_device *spi) host->powerup_msecs = 250; } - /* preallocate dma buffers */ + /* Preallocate buffers */ host->data = kmalloc(sizeof(*host->data), GFP_KERNEL); if (!host->data) goto fail_nobuf1; - status = mmc_spi_dma_alloc(host); - if (status) - goto fail_dma; - /* setup message for status/busy readback */ spi_message_init(&host->readback); - host->readback.is_dma_mapped = (host->dma_dev != NULL); spi_message_add_tail(&host->status, &host->readback); host->status.tx_buf = host->ones; - host->status.tx_dma = host->ones_dma; host->status.rx_buf = &host->data->status; - host->status.rx_dma = host->data_dma + offsetof(struct scratch, status); host->status.cs_change = 1; /* register card detect irq */ @@ -1464,9 +1292,8 @@ static int mmc_spi_probe(struct spi_device *spi) if (!status) has_ro = true; - dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", + dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n", dev_name(&mmc->class_dev), - host->dma_dev ? "" : ", no DMA", has_ro ? "" : ", no WP", (host->pdata && host->pdata->setpower) ? "" : ", no poweroff", @@ -1477,8 +1304,6 @@ static int mmc_spi_probe(struct spi_device *spi) fail_gpiod_request: mmc_remove_host(mmc); fail_glue_init: - mmc_spi_dma_free(host); -fail_dma: kfree(host->data); fail_nobuf1: mmc_spi_put_pdata(spi); @@ -1500,7 +1325,6 @@ static void mmc_spi_remove(struct spi_device *spi) mmc_remove_host(mmc); - mmc_spi_dma_free(host); kfree(host->data); kfree(host->ones); -- cgit From b062136d0d6f46d7ad5c88219cbd75f90cb18e81 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 14 Dec 2023 11:09:02 +0200 Subject: mmc: mmc_test: Add re-tuning test Add a test to repeatedly re-tune in between random reads. The test is non-destructive of data on the card and runs for 30 seconds. It can be repeated to test for longer durations. If re-tuning is not supported, the test is skipped. Example: # echo 'mmc1:0001' > /sys/bus/mmc/drivers/mmcblk/unbind # echo 'mmc1:0001' > /sys/bus/mmc/drivers/mmc_test/bind [ 36.642257] mmc_test mmc1:0001: Card claimed for testing. # cat /sys/kernel/debug/mmc1/mmc1\:0001/testlist | grep tuning 52: Re-tuning reliability # echo 52 > /sys/kernel/debug/mmc1/mmc1\:0001/test [ 91.522555] mmc1: Starting tests of card mmc1:0001... [ 91.528425] mmc1: Test case 52. Re-tuning reliability... [ 121.536682] mmc1: Result: OK [ 121.539572] mmc1: Tests completed. Signed-off-by: Adrian Hunter Link: https://lore.kernel.org/r/20231214090902.43628-1-adrian.hunter@intel.com Signed-off-by: Ulf Hansson --- drivers/mmc/core/mmc_test.c | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index 0f6a563103fd..8f7f587a0025 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -1904,7 +1904,7 @@ static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) } static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, - unsigned long sz) + unsigned long sz, int secs, int force_retuning) { unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; unsigned int ssz; @@ -1921,7 +1921,7 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, for (cnt = 0; cnt < UINT_MAX; cnt++) { ktime_get_ts64(&ts2); ts = timespec64_sub(ts2, ts1); - if (ts.tv_sec >= 10) + if (ts.tv_sec >= secs) break; ea = mmc_test_rnd_num(range1); if (ea == last_ea) @@ -1929,6 +1929,8 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, last_ea = ea; dev_addr = rnd_addr + test->card->pref_erase * ea + ssz * mmc_test_rnd_num(range2); + if (force_retuning) + mmc_retune_needed(test->card->host); ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); if (ret) return ret; @@ -1953,24 +1955,35 @@ static int mmc_test_random_perf(struct mmc_test_card *test, int write) */ if (write) { next = rnd_next; - ret = mmc_test_rnd_perf(test, write, 0, sz); + ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0); if (ret) return ret; rnd_next = next; } - ret = mmc_test_rnd_perf(test, write, 1, sz); + ret = mmc_test_rnd_perf(test, write, 1, sz, 10, 0); if (ret) return ret; } sz = t->max_tfr; if (write) { next = rnd_next; - ret = mmc_test_rnd_perf(test, write, 0, sz); + ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0); if (ret) return ret; rnd_next = next; } - return mmc_test_rnd_perf(test, write, 1, sz); + return mmc_test_rnd_perf(test, write, 1, sz, 10, 0); +} + +static int mmc_test_retuning(struct mmc_test_card *test) +{ + if (!mmc_can_retune(test->card->host)) { + pr_info("%s: No retuning - test skipped\n", + mmc_hostname(test->card->host)); + return RESULT_UNSUP_HOST; + } + + return mmc_test_rnd_perf(test, 0, 0, 8192, 30, 1); } /* @@ -2921,6 +2934,14 @@ static const struct mmc_test_case mmc_test_cases[] = { .run = mmc_test_cmds_during_write_cmd23_nonblock, .cleanup = mmc_test_area_cleanup, }, + + { + .name = "Re-tuning reliability", + .prepare = mmc_test_area_prepare, + .run = mmc_test_retuning, + .cleanup = mmc_test_area_cleanup, + }, + }; static DEFINE_MUTEX(mmc_test_lock); -- cgit From e4df56ad0bf3506c5189abb9be83f3bea05a4c4f Mon Sep 17 00:00:00 2001 From: Lin Gui Date: Tue, 19 Dec 2023 07:05:32 +0800 Subject: mmc: core: Add wp_grp_size sysfs node The eMMC card can be set into write-protected mode to prevent data from being accidentally modified or deleted. Wp_grp_size (Write Protect Group Size) refers to an attribute of the eMMC card, used to manage write protection and is the CSD register [36:32] of the eMMC device. Wp_grp_size (Write Protect Group Size) indicates how many eMMC blocks are contained in each write protection group on the eMMC card. To allow userspace easy access of the CSD register bits, let's add sysfs node "wp_grp_size". Signed-off-by: Lin Gui Signed-off-by: Bo Ye Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20231218230532.82427-1-bo.ye@mediatek.com Signed-off-by: Ulf Hansson --- drivers/mmc/core/mmc.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 7b996db570c9..adf7b70cf1b6 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -136,6 +136,17 @@ static void mmc_set_erase_size(struct mmc_card *card) mmc_init_erase(card); } + +static void mmc_set_wp_grp_size(struct mmc_card *card) +{ + if (card->ext_csd.erase_group_def & 1) + card->wp_grp_size = card->ext_csd.hc_erase_size * + card->ext_csd.raw_hc_erase_gap_size; + else + card->wp_grp_size = card->csd.erase_size * + (card->csd.wp_grp_size + 1); +} + /* * Given a 128-bit response, decode to our card CSD structure. */ @@ -186,6 +197,7 @@ static int mmc_decode_csd(struct mmc_card *card) b = UNSTUFF_BITS(resp, 37, 5); csd->erase_size = (a + 1) * (b + 1); csd->erase_size <<= csd->write_blkbits - 9; + csd->wp_grp_size = UNSTUFF_BITS(resp, 32, 5); } return 0; @@ -785,6 +797,7 @@ MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); +MMC_DEV_ATTR(wp_grp_size, "%u\n", card->wp_grp_size << 9); MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable); MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); @@ -845,6 +858,7 @@ static struct attribute *mmc_std_attrs[] = { &dev_attr_date.attr, &dev_attr_erase_size.attr, &dev_attr_preferred_erase_size.attr, + &dev_attr_wp_grp_size.attr, &dev_attr_fwrev.attr, &dev_attr_ffu_capable.attr, &dev_attr_hwrev.attr, @@ -1759,7 +1773,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, mmc_set_erase_size(card); } } - + mmc_set_wp_grp_size(card); /* * Ensure eMMC user default partition is enabled */ -- cgit From cb052da7f031b0d2309a4895ca236afb3b4bbf50 Mon Sep 17 00:00:00 2001 From: Peter Robinson Date: Wed, 20 Dec 2023 13:59:46 +0000 Subject: mmc: sdhci_am654: Fix TI SoC dependencies The sdhci_am654 is specific to recent TI SoCs, update the dependencies for those SoCs and compile testing. While we're at it update the text to reflect the wider range of supported TI SoCS the driver now supports. Fixes: 41fd4caeb00b ("mmc: sdhci_am654: Add Initial Support for AM654 SDHCI driver") Signed-off-by: Peter Robinson Link: https://lore.kernel.org/r/20231220135950.433588-1-pbrobinson@gmail.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/Kconfig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 58bd5fe4cd25..24ce5576b61a 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -1041,14 +1041,15 @@ config MMC_SDHCI_OMAP config MMC_SDHCI_AM654 tristate "Support for the SDHCI Controller in TI's AM654 SOCs" + depends on ARCH_K3 || COMPILE_TEST depends on MMC_SDHCI_PLTFM && OF select MMC_SDHCI_IO_ACCESSORS select MMC_CQHCI select REGMAP_MMIO help This selects the Secure Digital Host Controller Interface (SDHCI) - support present in TI's AM654 SOCs. The controller supports - SD/MMC/SDIO devices. + support present in TI's AM65x/AM64x/AM62x/J721E SOCs. The controller + supports SD/MMC/SDIO devices. If you have a controller with this interface, say Y or M here. -- cgit From 09f164d393a6671e5ff8342ba6b3cb7fe3f20208 Mon Sep 17 00:00:00 2001 From: Peter Robinson Date: Wed, 20 Dec 2023 13:59:47 +0000 Subject: mmc: sdhci_omap: Fix TI SoC dependencies The sdhci_omap is specific to older TI SoCs, update the dependencies for those SoCs and compile testing. While we're at it update the text to reflect the wider range of supported TI SoCS the driver now supports. Fixes: 7d326930d352 ("mmc: sdhci-omap: Add OMAP SDHCI driver") Signed-off-by: Peter Robinson Link: https://lore.kernel.org/r/20231220135950.433588-2-pbrobinson@gmail.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/Kconfig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 24ce5576b61a..81f2c4e05287 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -1026,14 +1026,15 @@ config MMC_SDHCI_XENON config MMC_SDHCI_OMAP tristate "TI SDHCI Controller Support" + depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST depends on MMC_SDHCI_PLTFM && OF select THERMAL imply TI_SOC_THERMAL select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE help This selects the Secure Digital Host Controller Interface (SDHCI) - support present in TI's DRA7 SOCs. The controller supports - SD/MMC/SDIO devices. + support present in TI's Keystone/OMAP2+/DRA7 SOCs. The controller + supports SD/MMC/SDIO devices. If you have a controller with this interface, say Y or M here. -- cgit From 77e01b49e35f24ebd1659096d5fc5c3b75975545 Mon Sep 17 00:00:00 2001 From: Mengqi Zhang Date: Mon, 25 Dec 2023 17:38:40 +0800 Subject: mmc: core: Add HS400 tuning in HS400es initialization During the initialization to HS400es stage, add a HS400 tuning flow as an optional process. For Mediatek IP, the HS400es mode requires a specific tuning to ensure the correct HS400 timing setting. Signed-off-by: Mengqi Zhang Link: https://lore.kernel.org/r/20231225093839.22931-2-mengqi.zhang@mediatek.com Signed-off-by: Ulf Hansson --- drivers/mmc/core/mmc.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index adf7b70cf1b6..f410bee50132 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1831,8 +1831,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (err) goto free_card; - - } else if (!mmc_card_hs400es(card)) { + } else if (mmc_card_hs400es(card)) { + if (host->ops->execute_hs400_tuning) { + err = host->ops->execute_hs400_tuning(host, card); + if (err) + goto free_card; + } + } else { /* Select the desired bus width optionally */ err = mmc_select_bus_width(card); if (err > 0 && mmc_card_hs(card)) { -- cgit From 67380251e8bbd3302c64fea07f95c31971b91c22 Mon Sep 17 00:00:00 2001 From: Jorge Ramirez-Ortiz Date: Wed, 3 Jan 2024 12:29:11 +0100 Subject: mmc: core: Do not force a retune before RPMB switch Requesting a retune before switching to the RPMB partition has been observed to cause CRC errors on the RPMB reads (-EILSEQ). Since RPMB reads can not be retried, the clients would be directly affected by the errors. This commit disables the retune request prior to switching to the RPMB partition: mmc_retune_pause() no longer triggers a retune before the pause period begins. This was verified with the sdhci-of-arasan driver (ZynqMP) configured for HS200 using two separate eMMC cards (DG4064 and 064GB2). In both cases, the error was easy to reproduce triggering every few tenths of reads. With this commit, systems that were utilizing OP-TEE to access RPMB variables will experience an enhanced performance. Specifically, when OP-TEE is configured to employ RPMB as a secure storage solution, it not only writes the data but also the secure filesystem within the partition. As a result, retrieving any variable involves multiple RPMB reads, typically around five. For context, on ZynqMP, each retune request consumed approximately 8ms. Consequently, reading any RPMB variable used to take at the very minimum 40ms. After droping the need to retune before switching to the RPMB partition, this is no longer the case. Signed-off-by: Jorge Ramirez-Ortiz Acked-by: Avri Altman Acked-by: Adrian Hunter Link: https://lore.kernel.org/r/20240103112911.2954632-1-jorge@foundries.io Signed-off-by: Ulf Hansson --- drivers/mmc/core/host.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 2f51db4df1a8..cf396e8f34e9 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -119,13 +119,12 @@ void mmc_retune_enable(struct mmc_host *host) /* * Pause re-tuning for a small set of operations. The pause begins after the - * next command and after first doing re-tuning. + * next command. */ void mmc_retune_pause(struct mmc_host *host) { if (!host->retune_paused) { host->retune_paused = 1; - mmc_retune_needed(host); mmc_retune_hold(host); } } -- cgit From fe86da368a1b751adec1776369cb15cd0aae0f10 Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Wed, 3 Jan 2024 17:23:38 -0500 Subject: mmc: sdhci-brcmstb: add new sdhci reset sequence for brcm 74165b0 74165b0 shall use a new sdio controller core version which requires a different reset sequence. For core reset we use sdhci_reset. For CMD and/or DATA reset added a new function to also enable SDHCI clocks SDHCI_CLOCK_CARD_EN SDHCI_CLOCK_INT_EN along with the SDHCI_RESET_CMD and/or SDHCI_RESET_DATA fields. Signed-off-by: Kamal Dasu Acked-by: Adrian Hunter Link: https://lore.kernel.org/r/20240103222338.31447-3-kamal.dasu@broadcom.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-brcmstb.c | 69 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 64 insertions(+), 5 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index c23251bb95f3..9053526fa212 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -44,8 +45,13 @@ struct brcmstb_match_priv { static inline void enable_clock_gating(struct sdhci_host *host) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); u32 reg; + if (!(priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK)) + return; + reg = sdhci_readl(host, SDHCI_VENDOR); reg |= SDHCI_VENDOR_GATE_SDCLK_EN; sdhci_writel(host, reg, SDHCI_VENDOR); @@ -53,14 +59,53 @@ static inline void enable_clock_gating(struct sdhci_host *host) static void brcmstb_reset(struct sdhci_host *host, u8 mask) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); - sdhci_and_cqhci_reset(host, mask); /* Reset will clear this, so re-enable it */ - if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK) - enable_clock_gating(host); + enable_clock_gating(host); +} + +static void brcmstb_sdhci_reset_cmd_data(struct sdhci_host *host, u8 mask) +{ + u32 new_mask = (mask & (SDHCI_RESET_CMD | SDHCI_RESET_DATA)) << 24; + int ret; + u32 reg; + + /* + * SDHCI_CLOCK_CONTROL register CARD_EN and CLOCK_INT_EN bits shall + * be set along with SOFTWARE_RESET register RESET_CMD or RESET_DATA + * bits, hence access SDHCI_CLOCK_CONTROL register as 32-bit register + */ + new_mask |= SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN; + reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL); + sdhci_writel(host, reg | new_mask, SDHCI_CLOCK_CONTROL); + + reg = sdhci_readb(host, SDHCI_SOFTWARE_RESET); + + ret = read_poll_timeout_atomic(sdhci_readb, reg, !(reg & mask), + 10, 10000, false, + host, SDHCI_SOFTWARE_RESET); + + if (ret) { + pr_err("%s: Reset 0x%x never completed.\n", + mmc_hostname(host->mmc), (int)mask); + sdhci_err_stats_inc(host, CTRL_TIMEOUT); + sdhci_dumpregs(host); + } +} + +static void brcmstb_reset_74165b0(struct sdhci_host *host, u8 mask) +{ + /* take care of RESET_ALL as usual */ + if (mask & SDHCI_RESET_ALL) + sdhci_and_cqhci_reset(host, SDHCI_RESET_ALL); + + /* cmd and/or data treated differently on this core */ + if (mask & (SDHCI_RESET_CMD | SDHCI_RESET_DATA)) + brcmstb_sdhci_reset_cmd_data(host, mask); + + /* Reset will clear this, so re-enable it */ + enable_clock_gating(host); } static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios) @@ -162,6 +207,13 @@ static struct sdhci_ops sdhci_brcmstb_ops_7216 = { .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, }; +static struct sdhci_ops sdhci_brcmstb_ops_74165b0 = { + .set_clock = sdhci_brcmstb_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = brcmstb_reset_74165b0, + .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, +}; + static struct brcmstb_match_priv match_priv_7425 = { .flags = BRCMSTB_MATCH_FLAGS_NO_64BIT | BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT, @@ -179,10 +231,17 @@ static const struct brcmstb_match_priv match_priv_7216 = { .ops = &sdhci_brcmstb_ops_7216, }; +static struct brcmstb_match_priv match_priv_74165b0 = { + .flags = BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE, + .hs400es = sdhci_brcmstb_hs400es, + .ops = &sdhci_brcmstb_ops_74165b0, +}; + static const struct of_device_id __maybe_unused sdhci_brcm_of_match[] = { { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 }, { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 }, { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 }, + { .compatible = "brcm,bcm74165b0-sdhci", .data = &match_priv_74165b0 }, {}, }; -- cgit From 5d40213347480e3ab903d5438dbd0d6b0110e6b8 Mon Sep 17 00:00:00 2001 From: Elad Nachman Date: Thu, 4 Jan 2024 19:30:33 +0200 Subject: mmc: xenon: Add ac5 support via bounce buffer AC5/X/IM SOCs has a variant of the Xenon eMMC controller, in which only 31-bit of addressing pass from the controller on the AXI bus. Since we cannot guarantee that only buffers from the first 2GB of memory will reach the driver, the driver is configured for SDMA mode, without 64-bit mode, overriding the DMA mask to 34-bit to support the DDR memory mapping, which starts at offset 8GB. Signed-off-by: Elad Nachman Acked-by: Adrian Hunter Link: https://lore.kernel.org/r/20240104173033.2836110-1-enachman@marvell.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-xenon.c | 31 +++++++++++++++++++++++++++++++ drivers/mmc/host/sdhci-xenon.h | 3 ++- 2 files changed, 33 insertions(+), 1 deletion(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 25ba7aecc3be..0e52867f6e91 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include "sdhci-pltfm.h" #include "sdhci-xenon.h" @@ -422,6 +424,7 @@ static int xenon_probe_params(struct platform_device *pdev) struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); u32 sdhc_id, nr_sdhc; u32 tuning_count; + struct sysinfo si; /* Disable HS200 on Armada AP806 */ if (priv->hw_version == XENON_AP806) @@ -450,6 +453,23 @@ static int xenon_probe_params(struct platform_device *pdev) } priv->tuning_count = tuning_count; + /* + * AC5/X/IM HW has only 31-bits passed in the crossbar switch. + * If we have more than 2GB of memory, this means we might pass + * memory pointers which are above 2GB and which cannot be properly + * represented. In this case, disable ADMA, 64-bit DMA and allow only SDMA. + * This effectively will enable bounce buffer quirk in the + * generic SDHCI driver, which will make sure DMA is only done + * from supported memory regions: + */ + if (priv->hw_version == XENON_AC5) { + si_meminfo(&si); + if (si.totalram * si.mem_unit > SZ_2G) { + host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; + host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA; + } + } + return xenon_phy_parse_params(dev, host); } @@ -562,6 +582,16 @@ static int xenon_probe(struct platform_device *pdev) goto remove_sdhc; pm_runtime_put_autosuspend(&pdev->dev); + /* + * If we previously detected AC5 with over 2GB of memory, + * then we disable ADMA and 64-bit DMA. + * This means generic SDHCI driver has set the DMA mask to + * 32-bit. Since DDR starts at 0x2_0000_0000, we must use + * 34-bit DMA mask to access this DDR memory: + */ + if (priv->hw_version == XENON_AC5 && + host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34)); return 0; @@ -680,6 +710,7 @@ static const struct of_device_id sdhci_xenon_dt_ids[] = { { .compatible = "marvell,armada-ap807-sdhci", .data = (void *)XENON_AP807}, { .compatible = "marvell,armada-cp110-sdhci", .data = (void *)XENON_CP110}, { .compatible = "marvell,armada-3700-sdhci", .data = (void *)XENON_A3700}, + { .compatible = "marvell,ac5-sdhci", .data = (void *)XENON_AC5}, {} }; MODULE_DEVICE_TABLE(of, sdhci_xenon_dt_ids); diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h index 3e9c6c908a79..0460d97aad26 100644 --- a/drivers/mmc/host/sdhci-xenon.h +++ b/drivers/mmc/host/sdhci-xenon.h @@ -57,7 +57,8 @@ enum xenon_variant { XENON_A3700, XENON_AP806, XENON_AP807, - XENON_CP110 + XENON_CP110, + XENON_AC5 }; struct xenon_priv { -- cgit