diff options
Diffstat (limited to 'drivers/spi/spi-rockchip.c')
| -rw-r--r-- | drivers/spi/spi-rockchip.c | 323 |
1 files changed, 202 insertions, 121 deletions
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 553b6b9d0222..1a6381de6f33 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -104,8 +104,8 @@ #define CR0_XFM_RO 0x2 #define CR0_OPM_OFFSET 20 -#define CR0_OPM_MASTER 0x0 -#define CR0_OPM_SLAVE 0x1 +#define CR0_OPM_HOST 0x0 +#define CR0_OPM_TARGET 0x1 #define CR0_SOI_OFFSET 23 @@ -125,7 +125,7 @@ #define SR_TF_EMPTY (1 << 2) #define SR_RF_EMPTY (1 << 3) #define SR_RF_FULL (1 << 4) -#define SR_SLAVE_TX_BUSY (1 << 5) +#define SR_TARGET_TX_BUSY (1 << 5) /* Bit fields in ISR, IMR, ISR, RISR, 5bit */ #define INT_MASK 0x1f @@ -133,7 +133,8 @@ #define INT_TF_OVERFLOW (1 << 1) #define INT_RF_UNDERFLOW (1 << 2) #define INT_RF_OVERFLOW (1 << 3) -#define INT_RF_FULL (1 << 4) +#define INT_RF_FULL (1 << 4) +#define INT_CS_INACTIVE (1 << 6) /* Bit fields in ICR, 4bit */ #define ICR_MASK 0x0f @@ -150,7 +151,7 @@ #define RXDMA (1 << 0) #define TXDMA (1 << 1) -/* sclk_out: spi master internal logic in rk3x can support 50Mhz */ +/* sclk_out: spi host internal logic in rk3x can support 50Mhz */ #define MAX_SCLK_OUT 50000000U /* @@ -159,8 +160,7 @@ */ #define ROCKCHIP_SPI_MAX_TRANLEN 0xffff -/* 2 for native cs, 2 for cs-gpio */ -#define ROCKCHIP_SPI_MAX_CS_NUM 4 +#define ROCKCHIP_SPI_MAX_NATIVE_CS_NUM 2 #define ROCKCHIP_SPI_VER2_TYPE1 0x05EC0002 #define ROCKCHIP_SPI_VER2_TYPE2 0x00110002 @@ -191,9 +191,11 @@ struct rockchip_spi { u8 n_bytes; u8 rsd; - bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM]; + bool target_abort; + bool cs_inactive; /* spi target transmission stop when cs inactive */ + bool cs_high_supported; /* native CS supports active-high polarity */ - bool slave_abort; + struct spi_transfer *xfer; /* Store xfer temporarily */ }; static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable) @@ -201,13 +203,13 @@ static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable) writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR); } -static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool slave_mode) +static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool target_mode) { unsigned long timeout = jiffies + msecs_to_jiffies(5); do { - if (slave_mode) { - if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_SLAVE_TX_BUSY) && + if (target_mode) { + if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_TARGET_TX_BUSY) && !((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))) return; } else { @@ -239,30 +241,40 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable) struct spi_controller *ctlr = spi->controller; struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable; + bool cs_actual; - /* Return immediately for no-op */ - if (cs_asserted == rs->cs_asserted[spi->chip_select]) + /* + * SPI subsystem tries to avoid no-op calls that would break the PM + * refcount below. It can't however for the first time it is used. + * To detect this case we read it here and bail out early for no-ops. + */ + if (spi_get_csgpiod(spi, 0)) + cs_actual = !!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & 1); + else + cs_actual = !!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & + BIT(spi_get_chipselect(spi, 0))); + if (unlikely(cs_actual == cs_asserted)) return; if (cs_asserted) { /* Keep things powered as long as CS is asserted */ pm_runtime_get_sync(rs->dev); - if (spi->cs_gpiod) + if (spi_get_csgpiod(spi, 0)) ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, 1); else - ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select)); + ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, + BIT(spi_get_chipselect(spi, 0))); } else { - if (spi->cs_gpiod) + if (spi_get_csgpiod(spi, 0)) ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, 1); else - ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select)); + ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, + BIT(spi_get_chipselect(spi, 0))); /* Drop reference from when we first asserted CS */ pm_runtime_put(rs->dev); } - - rs->cs_asserted[spi->chip_select] = cs_asserted; } static void rockchip_spi_handle_err(struct spi_controller *ctlr, @@ -275,8 +287,9 @@ static void rockchip_spi_handle_err(struct spi_controller *ctlr, */ spi_enable_chip(rs, false); - /* make sure all interrupts are masked */ + /* make sure all interrupts are masked and status cleared */ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR); + writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR); if (atomic_read(&rs->state) & TXDMA) dmaengine_terminate_async(ctlr->dma_tx); @@ -343,6 +356,15 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id) struct spi_controller *ctlr = dev_id; struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + /* When int_cs_inactive comes, spi target abort */ + if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_IMR) & INT_CS_INACTIVE) { + ctlr->target_abort(ctlr); + writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR); + writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR); + + return IRQ_HANDLED; + } + if (rs->tx_left) rockchip_spi_pio_writer(rs); @@ -350,6 +372,7 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id) if (!rs->rx_left) { spi_enable_chip(rs, false); writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR); + writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR); spi_finalize_current_transfer(ctlr); } @@ -357,19 +380,26 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id) } static int rockchip_spi_prepare_irq(struct rockchip_spi *rs, - struct spi_transfer *xfer) + struct spi_controller *ctlr, + struct spi_transfer *xfer) { rs->tx = xfer->tx_buf; rs->rx = xfer->rx_buf; rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0; rs->rx_left = xfer->len / rs->n_bytes; - writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR); + writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR); + spi_enable_chip(rs, true); if (rs->tx_left) rockchip_spi_pio_writer(rs); + if (rs->cs_inactive) + writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR); + else + writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR); + /* 1 means the transfer is in progress */ return 1; } @@ -380,9 +410,12 @@ static void rockchip_spi_dma_rxcb(void *data) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); int state = atomic_fetch_andnot(RXDMA, &rs->state); - if (state & TXDMA && !rs->slave_abort) + if (state & TXDMA && !rs->target_abort) return; + if (rs->cs_inactive) + writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR); + spi_enable_chip(rs, false); spi_finalize_current_transfer(ctlr); } @@ -393,11 +426,11 @@ static void rockchip_spi_dma_txcb(void *data) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); int state = atomic_fetch_andnot(TXDMA, &rs->state); - if (state & RXDMA && !rs->slave_abort) + if (state & RXDMA && !rs->target_abort) return; /* Wait until the FIFO data completely. */ - wait_for_tx_idle(rs, ctlr->slave); + wait_for_tx_idle(rs, ctlr->target); spi_enable_chip(rs, false); spi_finalize_current_transfer(ctlr); @@ -423,14 +456,16 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs, atomic_set(&rs->state, 0); + rs->tx = xfer->tx_buf; + rs->rx = xfer->rx_buf; + rxdesc = NULL; if (xfer->rx_buf) { struct dma_slave_config rxconf = { .direction = DMA_DEV_TO_MEM, .src_addr = rs->dma_addr_rx, .src_addr_width = rs->n_bytes, - .src_maxburst = rockchip_spi_calc_burst_size(xfer->len / - rs->n_bytes), + .src_maxburst = rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes), }; dmaengine_slave_config(ctlr->dma_rx, &rxconf); @@ -474,10 +509,13 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs, /* rx must be started before tx due to spi instinct */ if (rxdesc) { atomic_or(RXDMA, &rs->state); - dmaengine_submit(rxdesc); + ctlr->dma_rx->cookie = dmaengine_submit(rxdesc); dma_async_issue_pending(ctlr->dma_rx); } + if (rs->cs_inactive) + writel_relaxed(INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR); + spi_enable_chip(rs, true); if (txdesc) { @@ -492,7 +530,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs, static int rockchip_spi_config(struct rockchip_spi *rs, struct spi_device *spi, struct spi_transfer *xfer, - bool use_dma, bool slave_mode) + bool use_dma, bool target_mode) { u32 cr0 = CR0_FRF_SPI << CR0_FRF_OFFSET | CR0_BHT_8BIT << CR0_BHT_OFFSET @@ -501,16 +539,16 @@ static int rockchip_spi_config(struct rockchip_spi *rs, u32 cr1; u32 dmacr = 0; - if (slave_mode) - cr0 |= CR0_OPM_SLAVE << CR0_OPM_OFFSET; - rs->slave_abort = false; + if (target_mode) + cr0 |= CR0_OPM_TARGET << CR0_OPM_OFFSET; + rs->target_abort = false; cr0 |= rs->rsd << CR0_RSD_OFFSET; cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET; if (spi->mode & SPI_LSB_FIRST) cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET; - if (spi->mode & SPI_CS_HIGH) - cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET; + if ((spi->mode & SPI_CS_HIGH) && !(spi_get_csgpiod(spi, 0))) + cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET; if (xfer->rx_buf && xfer->tx_buf) cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET; @@ -581,11 +619,52 @@ static size_t rockchip_spi_max_transfer_size(struct spi_device *spi) return ROCKCHIP_SPI_MAX_TRANLEN; } -static int rockchip_spi_slave_abort(struct spi_controller *ctlr) +static int rockchip_spi_target_abort(struct spi_controller *ctlr) { struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + u32 rx_fifo_left; + struct dma_tx_state state; + enum dma_status status; + + /* Get current dma rx point */ + if (atomic_read(&rs->state) & RXDMA) { + dmaengine_pause(ctlr->dma_rx); + status = dmaengine_tx_status(ctlr->dma_rx, ctlr->dma_rx->cookie, &state); + if (status == DMA_ERROR) { + rs->rx = rs->xfer->rx_buf; + rs->xfer->len = 0; + rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR); + for (; rx_fifo_left; rx_fifo_left--) + readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR); + goto out; + } else { + rs->rx += rs->xfer->len - rs->n_bytes * state.residue; + } + } - rs->slave_abort = true; + /* Get the valid data left in rx fifo and set rs->xfer->len real rx size */ + if (rs->rx) { + rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR); + for (; rx_fifo_left; rx_fifo_left--) { + u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR); + + if (rs->n_bytes == 1) + *(u8 *)rs->rx = (u8)rxw; + else + *(u16 *)rs->rx = (u16)rxw; + rs->rx += rs->n_bytes; + } + rs->xfer->len = (unsigned int)(rs->rx - rs->xfer->rx_buf); + } + +out: + if (atomic_read(&rs->state) & RXDMA) + dmaengine_terminate_sync(ctlr->dma_rx); + if (atomic_read(&rs->state) & TXDMA) + dmaengine_terminate_sync(ctlr->dma_tx); + atomic_set(&rs->state, 0); + spi_enable_chip(rs, false); + rs->target_abort = true; spi_finalize_current_transfer(ctlr); return 0; @@ -620,17 +699,17 @@ static int rockchip_spi_transfer_one( } rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2; - + rs->xfer = xfer; use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false; - ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave); + ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->target); if (ret) return ret; if (use_dma) return rockchip_spi_prepare_dma(rs, ctlr, xfer); - return rockchip_spi_prepare_irq(rs, xfer); + return rockchip_spi_prepare_irq(rs, ctlr, xfer); } static bool rockchip_spi_can_dma(struct spi_controller *ctlr, @@ -647,24 +726,50 @@ static bool rockchip_spi_can_dma(struct spi_controller *ctlr, return xfer->len / bytes_per_word >= rs->fifo_len; } +static int rockchip_spi_setup(struct spi_device *spi) +{ + struct rockchip_spi *rs = spi_controller_get_devdata(spi->controller); + u32 cr0; + + if (!spi_get_csgpiod(spi, 0) && (spi->mode & SPI_CS_HIGH) && !rs->cs_high_supported) { + dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n"); + return -EINVAL; + } + + pm_runtime_get_sync(rs->dev); + + cr0 = readl_relaxed(rs->regs + ROCKCHIP_SPI_CTRLR0); + + cr0 &= ~(0x3 << CR0_SCPH_OFFSET); + cr0 |= ((spi->mode & 0x3) << CR0_SCPH_OFFSET); + if (spi->mode & SPI_CS_HIGH && spi_get_chipselect(spi, 0) <= 1) + cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET; + else if (spi_get_chipselect(spi, 0) <= 1) + cr0 &= ~(BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET); + + writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); + + pm_runtime_put(rs->dev); + + return 0; +} + static int rockchip_spi_probe(struct platform_device *pdev) { - int ret; - struct rockchip_spi *rs; + struct device_node *np = pdev->dev.of_node; struct spi_controller *ctlr; + struct rockchip_spi *rs; struct resource *mem; - struct device_node *np = pdev->dev.of_node; - u32 rsd_nsecs; - bool slave_mode; + u32 rsd_nsecs, num_cs; + bool target_mode; + int ret; - slave_mode = of_property_read_bool(np, "spi-slave"); + target_mode = of_property_read_bool(np, "spi-slave"); - if (slave_mode) - ctlr = spi_alloc_slave(&pdev->dev, - sizeof(struct rockchip_spi)); + if (target_mode) + ctlr = spi_alloc_target(&pdev->dev, sizeof(struct rockchip_spi)); else - ctlr = spi_alloc_master(&pdev->dev, - sizeof(struct rockchip_spi)); + ctlr = spi_alloc_host(&pdev->dev, sizeof(struct rockchip_spi)); if (!ctlr) return -ENOMEM; @@ -672,52 +777,38 @@ static int rockchip_spi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ctlr); rs = spi_controller_get_devdata(ctlr); - ctlr->slave = slave_mode; /* Get basic io resource and map it */ - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rs->regs = devm_ioremap_resource(&pdev->dev, mem); + rs->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); if (IS_ERR(rs->regs)) { - ret = PTR_ERR(rs->regs); + ret = PTR_ERR(rs->regs); goto err_put_ctlr; } - rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); + rs->apb_pclk = devm_clk_get_enabled(&pdev->dev, "apb_pclk"); if (IS_ERR(rs->apb_pclk)) { - dev_err(&pdev->dev, "Failed to get apb_pclk\n"); - ret = PTR_ERR(rs->apb_pclk); + ret = dev_err_probe(&pdev->dev, PTR_ERR(rs->apb_pclk), + "Failed to get apb_pclk\n"); goto err_put_ctlr; } - rs->spiclk = devm_clk_get(&pdev->dev, "spiclk"); + rs->spiclk = devm_clk_get_enabled(&pdev->dev, "spiclk"); if (IS_ERR(rs->spiclk)) { - dev_err(&pdev->dev, "Failed to get spi_pclk\n"); - ret = PTR_ERR(rs->spiclk); - goto err_put_ctlr; - } - - ret = clk_prepare_enable(rs->apb_pclk); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to enable apb_pclk\n"); + ret = dev_err_probe(&pdev->dev, PTR_ERR(rs->spiclk), + "Failed to get spi_pclk\n"); goto err_put_ctlr; } - ret = clk_prepare_enable(rs->spiclk); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to enable spi_clk\n"); - goto err_disable_apbclk; - } - spi_enable_chip(rs, false); ret = platform_get_irq(pdev, 0); if (ret < 0) - goto err_disable_spiclk; + goto err_put_ctlr; ret = devm_request_threaded_irq(&pdev->dev, ret, rockchip_spi_isr, NULL, - IRQF_ONESHOT, dev_name(&pdev->dev), ctlr); + IRQF_ONESHOT, dev_name(&pdev->dev), ctlr); if (ret) - goto err_disable_spiclk; + goto err_put_ctlr; rs->dev = &pdev->dev; rs->freq = clk_get_rate(rs->spiclk); @@ -725,25 +816,23 @@ static int rockchip_spi_probe(struct platform_device *pdev) if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns", &rsd_nsecs)) { /* rx sample delay is expressed in parent clock cycles (max 3) */ - u32 rsd = DIV_ROUND_CLOSEST(rsd_nsecs * (rs->freq >> 8), - 1000000000 >> 8); + u32 rsd = DIV_ROUND_CLOSEST(rsd_nsecs * (rs->freq >> 8), 1000000000 >> 8); if (!rsd) { dev_warn(rs->dev, "%u Hz are too slow to express %u ns delay\n", - rs->freq, rsd_nsecs); + rs->freq, rsd_nsecs); } else if (rsd > CR0_RSD_MAX) { rsd = CR0_RSD_MAX; - dev_warn(rs->dev, "%u Hz are too fast to express %u ns delay, clamping at %u ns\n", - rs->freq, rsd_nsecs, - CR0_RSD_MAX * 1000000000U / rs->freq); + dev_warn(rs->dev, + "%u Hz are too fast to express %u ns delay, clamping at %u ns\n", + rs->freq, rsd_nsecs, CR0_RSD_MAX * 1000000000U / rs->freq); } rs->rsd = rsd; } rs->fifo_len = get_fifo_len(rs); if (!rs->fifo_len) { - dev_err(&pdev->dev, "Failed to get fifo length\n"); - ret = -EINVAL; - goto err_disable_spiclk; + ret = dev_err_probe(&pdev->dev, -EINVAL, "Failed to get fifo length\n"); + goto err_put_ctlr; } pm_runtime_set_autosuspend_delay(&pdev->dev, ROCKCHIP_AUTOSUSPEND_TIMEOUT); @@ -754,18 +843,19 @@ static int rockchip_spi_probe(struct platform_device *pdev) ctlr->auto_runtime_pm = true; ctlr->bus_num = pdev->id; ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST; - if (slave_mode) { + if (target_mode) { ctlr->mode_bits |= SPI_NO_CS; - ctlr->slave_abort = rockchip_spi_slave_abort; + ctlr->target_abort = rockchip_spi_target_abort; } else { - ctlr->flags = SPI_MASTER_GPIO_SS; - ctlr->max_native_cs = ROCKCHIP_SPI_MAX_CS_NUM; + ctlr->flags = SPI_CONTROLLER_GPIO_SS; + ctlr->max_native_cs = ROCKCHIP_SPI_MAX_NATIVE_CS_NUM; /* * rk spi0 has two native cs, spi1..5 one cs only * if num-cs is missing in the dts, default to 1 */ - if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect)) - ctlr->num_chipselect = 1; + if (of_property_read_u32(np, "num-cs", &num_cs)) + num_cs = 1; + ctlr->num_chipselect = num_cs; ctlr->use_gpio_descriptors = true; } ctlr->dev.of_node = pdev->dev.of_node; @@ -773,6 +863,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX; ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT); + ctlr->setup = rockchip_spi_setup; ctlr->set_cs = rockchip_spi_set_cs; ctlr->transfer_one = rockchip_spi_transfer_one; ctlr->max_transfer_size = rockchip_spi_max_transfer_size; @@ -780,22 +871,21 @@ static int rockchip_spi_probe(struct platform_device *pdev) ctlr->dma_tx = dma_request_chan(rs->dev, "tx"); if (IS_ERR(ctlr->dma_tx)) { - /* Check tx to see if we need defer probing driver */ - if (PTR_ERR(ctlr->dma_tx) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; + /* Check tx to see if we need to defer driver probing */ + ret = dev_warn_probe(rs->dev, PTR_ERR(ctlr->dma_tx), + "Failed to request optional TX DMA channel\n"); + if (ret == -EPROBE_DEFER) goto err_disable_pm_runtime; - } - dev_warn(rs->dev, "Failed to request TX DMA channel\n"); ctlr->dma_tx = NULL; } ctlr->dma_rx = dma_request_chan(rs->dev, "rx"); if (IS_ERR(ctlr->dma_rx)) { - if (PTR_ERR(ctlr->dma_rx) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; + /* Check rx to see if we need to defer driver probing */ + ret = dev_warn_probe(rs->dev, PTR_ERR(ctlr->dma_rx), + "Failed to request optional RX DMA channel\n"); + if (ret == -EPROBE_DEFER) goto err_free_dma_tx; - } - dev_warn(rs->dev, "Failed to request RX DMA channel\n"); ctlr->dma_rx = NULL; } @@ -807,9 +897,15 @@ static int rockchip_spi_probe(struct platform_device *pdev) switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) { case ROCKCHIP_SPI_VER2_TYPE2: + rs->cs_high_supported = true; ctlr->mode_bits |= SPI_CS_HIGH; + if (ctlr->can_dma && target_mode) + rs->cs_inactive = true; + else + rs->cs_inactive = false; break; default: + rs->cs_inactive = false; break; } @@ -829,26 +925,18 @@ err_free_dma_tx: dma_release_channel(ctlr->dma_tx); err_disable_pm_runtime: pm_runtime_disable(&pdev->dev); -err_disable_spiclk: - clk_disable_unprepare(rs->spiclk); -err_disable_apbclk: - clk_disable_unprepare(rs->apb_pclk); err_put_ctlr: spi_controller_put(ctlr); return ret; } -static int rockchip_spi_remove(struct platform_device *pdev) +static void rockchip_spi_remove(struct platform_device *pdev) { struct spi_controller *ctlr = spi_controller_get(platform_get_drvdata(pdev)); - struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); pm_runtime_get_sync(&pdev->dev); - clk_disable_unprepare(rs->spiclk); - clk_disable_unprepare(rs->apb_pclk); - pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); @@ -859,8 +947,6 @@ static int rockchip_spi_remove(struct platform_device *pdev) dma_release_channel(ctlr->dma_rx); spi_controller_put(ctlr); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -874,8 +960,10 @@ static int rockchip_spi_suspend(struct device *dev) return ret; ret = pm_runtime_force_suspend(dev); - if (ret < 0) + if (ret < 0) { + spi_controller_resume(ctlr); return ret; + } pinctrl_pm_select_sleep_state(dev); @@ -886,7 +974,6 @@ static int rockchip_spi_resume(struct device *dev) { int ret; struct spi_controller *ctlr = dev_get_drvdata(dev); - struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); pinctrl_pm_select_default_state(dev); @@ -894,13 +981,7 @@ static int rockchip_spi_resume(struct device *dev) if (ret < 0) return ret; - ret = spi_controller_resume(ctlr); - if (ret < 0) { - clk_disable_unprepare(rs->spiclk); - clk_disable_unprepare(rs->apb_pclk); - } - - return 0; + return spi_controller_resume(ctlr); } #endif /* CONFIG_PM_SLEEP */ @@ -935,7 +1016,7 @@ static int rockchip_spi_runtime_resume(struct device *dev) #endif /* CONFIG_PM */ static const struct dev_pm_ops rockchip_spi_pm = { - SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume) SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend, rockchip_spi_runtime_resume, NULL) }; |
