summaryrefslogtreecommitdiff
path: root/drivers/spi/spi-fsl-dspi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/spi-fsl-dspi.c')
-rw-r--r--drivers/spi/spi-fsl-dspi.c719
1 files changed, 494 insertions, 225 deletions
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index e419642eb10e..83ea296597e9 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2013 Freescale Semiconductor, Inc.
-// Copyright 2020 NXP
+// Copyright 2020-2025 NXP
//
// Freescale DSPI driver
// This file contains a driver for the Freescale DSPI
@@ -13,7 +13,8 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
@@ -22,7 +23,8 @@
#define DRIVER_NAME "fsl-dspi"
#define SPI_MCR 0x00
-#define SPI_MCR_MASTER BIT(31)
+#define SPI_MCR_HOST BIT(31)
+#define SPI_MCR_MTFE BIT(26)
#define SPI_MCR_PCSIS(x) ((x) << 16)
#define SPI_MCR_CLR_TXF BIT(11)
#define SPI_MCR_CLR_RXF BIT(10)
@@ -34,8 +36,9 @@
#define SPI_TCR 0x08
#define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16)
-#define SPI_CTAR(x) (0x0c + (((x) & GENMASK(1, 0)) * 4))
+#define SPI_CTAR(x) (0x0c + (((x) & GENMASK(2, 0)) * 4))
#define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27))
+#define SPI_CTAR_DBR BIT(31)
#define SPI_CTAR_CPOL BIT(26)
#define SPI_CTAR_CPHA BIT(25)
#define SPI_CTAR_LSBFE BIT(24)
@@ -61,6 +64,7 @@
#define SPI_SR_TFIWF BIT(18)
#define SPI_SR_RFDF BIT(17)
#define SPI_SR_CMDFFF BIT(16)
+#define SPI_SR_TXRXS BIT(30)
#define SPI_SR_CLEAR (SPI_SR_TCFQF | \
SPI_SR_TFUF | SPI_SR_TFFF | \
SPI_SR_CMDTCF | SPI_SR_SPEF | \
@@ -91,12 +95,14 @@
#define SPI_TXFR1 0x40
#define SPI_TXFR2 0x44
#define SPI_TXFR3 0x48
+#define SPI_TXFR4 0x4C
#define SPI_RXFR0 0x7c
#define SPI_RXFR1 0x80
#define SPI_RXFR2 0x84
#define SPI_RXFR3 0x88
+#define SPI_RXFR4 0x8C
-#define SPI_CTARE(x) (0x11c + (((x) & GENMASK(1, 0)) * 4))
+#define SPI_CTARE(x) (0x11c + (((x) & GENMASK(2, 0)) * 4))
#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
@@ -107,6 +113,8 @@
#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
+#define SPI_25MHZ 25000000
+
struct chip_data {
u32 ctar_val;
};
@@ -120,6 +128,7 @@ struct fsl_dspi_devtype_data {
enum dspi_trans_mode trans_mode;
u8 max_clock_factor;
int fifo_size;
+ const struct regmap_config *regmap;
};
enum {
@@ -133,6 +142,102 @@ enum {
LX2160A,
MCF5441X,
VF610,
+ S32G,
+ S32G_TARGET,
+};
+
+static const struct regmap_range dspi_yes_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_MCR),
+ regmap_reg_range(SPI_TCR, SPI_CTAR(3)),
+ regmap_reg_range(SPI_SR, SPI_TXFR3),
+ regmap_reg_range(SPI_RXFR0, SPI_RXFR3),
+ regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)),
+ regmap_reg_range(SPI_SREX, SPI_SREX),
+};
+
+static const struct regmap_range s32g_dspi_yes_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_MCR),
+ regmap_reg_range(SPI_TCR, SPI_CTAR(5)),
+ regmap_reg_range(SPI_SR, SPI_TXFR4),
+ regmap_reg_range(SPI_RXFR0, SPI_RXFR4),
+ regmap_reg_range(SPI_CTARE(0), SPI_CTARE(5)),
+ regmap_reg_range(SPI_SREX, SPI_SREX),
+};
+
+static const struct regmap_access_table dspi_access_table = {
+ .yes_ranges = dspi_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges),
+};
+
+static const struct regmap_access_table s32g_dspi_access_table = {
+ .yes_ranges = s32g_dspi_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(s32g_dspi_yes_ranges),
+};
+
+static const struct regmap_range dspi_volatile_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_TCR),
+ regmap_reg_range(SPI_SR, SPI_SR),
+ regmap_reg_range(SPI_PUSHR, SPI_RXFR4),
+ regmap_reg_range(SPI_SREX, SPI_SREX),
+};
+
+static const struct regmap_access_table dspi_volatile_table = {
+ .yes_ranges = dspi_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
+};
+
+enum {
+ DSPI_REGMAP,
+ S32G_DSPI_REGMAP,
+ DSPI_XSPI_REGMAP,
+ S32G_DSPI_XSPI_REGMAP,
+ DSPI_PUSHR,
+};
+
+static const struct regmap_config dspi_regmap_config[] = {
+ [DSPI_REGMAP] = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = SPI_RXFR3,
+ .volatile_table = &dspi_volatile_table,
+ .rd_table = &dspi_access_table,
+ .wr_table = &dspi_access_table,
+ },
+ [S32G_DSPI_REGMAP] = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = SPI_RXFR4,
+ .volatile_table = &dspi_volatile_table,
+ .wr_table = &s32g_dspi_access_table,
+ .rd_table = &s32g_dspi_access_table,
+ },
+ [DSPI_XSPI_REGMAP] = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = SPI_SREX,
+ .volatile_table = &dspi_volatile_table,
+ .rd_table = &dspi_access_table,
+ .wr_table = &dspi_access_table,
+ },
+ [S32G_DSPI_XSPI_REGMAP] = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = SPI_SREX,
+ .volatile_table = &dspi_volatile_table,
+ .wr_table = &s32g_dspi_access_table,
+ .rd_table = &s32g_dspi_access_table,
+ },
+ [DSPI_PUSHR] = {
+ .name = "pushr",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .max_register = 0x2,
+ },
};
static const struct fsl_dspi_devtype_data devtype_data[] = {
@@ -140,55 +245,77 @@ static const struct fsl_dspi_devtype_data devtype_data[] = {
.trans_mode = DSPI_DMA_MODE,
.max_clock_factor = 2,
.fifo_size = 4,
+ .regmap = &dspi_regmap_config[DSPI_REGMAP],
},
[LS1021A] = {
/* Has A-011218 DMA erratum */
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 4,
+ .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
},
[LS1012A] = {
/* Has A-011218 DMA erratum */
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 16,
+ .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
},
[LS1028A] = {
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 4,
+ .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
},
[LS1043A] = {
/* Has A-011218 DMA erratum */
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 16,
+ .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
},
[LS1046A] = {
/* Has A-011218 DMA erratum */
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 16,
+ .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
},
[LS2080A] = {
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 4,
+ .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
},
[LS2085A] = {
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 4,
+ .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
},
[LX2160A] = {
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 4,
+ .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
},
[MCF5441X] = {
.trans_mode = DSPI_DMA_MODE,
.max_clock_factor = 8,
.fifo_size = 16,
+ .regmap = &dspi_regmap_config[DSPI_REGMAP],
+ },
+ [S32G] = {
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 1,
+ .fifo_size = 5,
+ .regmap = &dspi_regmap_config[S32G_DSPI_XSPI_REGMAP],
+ },
+ [S32G_TARGET] = {
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 1,
+ .fifo_size = 5,
+ .regmap = &dspi_regmap_config[S32G_DSPI_REGMAP],
},
};
@@ -204,6 +331,8 @@ struct fsl_dspi_dma {
dma_addr_t rx_dma_phys;
struct completion cmd_rx_complete;
struct dma_async_tx_descriptor *rx_desc;
+
+ size_t bufsize;
};
struct fsl_dspi {
@@ -223,6 +352,7 @@ struct fsl_dspi {
const void *tx;
void *rx;
u16 tx_cmd;
+ bool mtf_enabled;
const struct fsl_dspi_devtype_data *devtype_data;
struct completion xfer_done;
@@ -245,6 +375,14 @@ struct fsl_dspi {
void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
};
+static void dspi_setup_accel(struct fsl_dspi *dspi);
+
+static bool is_s32g_dspi(struct fsl_dspi *data)
+{
+ return data->devtype_data == &devtype_data[S32G] ||
+ data->devtype_data == &devtype_data[S32G_TARGET];
+}
+
static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
{
switch (dspi->oper_word_size) {
@@ -279,25 +417,25 @@ static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
{
- *txdata = cpu_to_be32(*(u32 *)dspi->tx);
+ *txdata = (__force u32)cpu_to_be32(*(u32 *)dspi->tx);
dspi->tx += sizeof(u32);
}
static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
{
- *(u32 *)dspi->rx = be32_to_cpu(rxdata);
+ *(u32 *)dspi->rx = be32_to_cpu((__force __be32)rxdata);
dspi->rx += sizeof(u32);
}
static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
{
- *txdata = cpu_to_be16(*(u16 *)dspi->tx);
+ *txdata = (__force u32)cpu_to_be16(*(u16 *)dspi->tx);
dspi->tx += sizeof(u16);
}
static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
{
- *(u16 *)dspi->rx = be16_to_cpu(rxdata);
+ *(u16 *)dspi->rx = be16_to_cpu((__force __be16)rxdata);
dspi->rx += sizeof(u16);
}
@@ -334,12 +472,33 @@ static u32 dspi_pop_tx(struct fsl_dspi *dspi)
return txdata;
}
+/* Push one word to the RX buffer from the POPR register (RX FIFO) */
+static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
+{
+ if (!dspi->rx)
+ return;
+ dspi->dev_to_host(dspi, rxdata);
+}
+
+static int dspi_fifo_error(struct fsl_dspi *dspi, u32 spi_sr)
+{
+ if (spi_sr & (SPI_SR_TFUF | SPI_SR_RFOF)) {
+ dev_err_ratelimited(&dspi->pdev->dev, "FIFO errors:%s%s\n",
+ spi_sr & SPI_SR_TFUF ? " TX underflow," : "",
+ spi_sr & SPI_SR_RFOF ? " RX overflow," : "");
+ return -EIO;
+ }
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DMA_ENGINE)
+
/* Prepare one TX FIFO entry (txdata plus cmd) */
static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
- if (spi_controller_is_slave(dspi->ctlr))
+ if (spi_controller_is_target(dspi->ctlr))
return data;
if (dspi->len > 0)
@@ -347,19 +506,37 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
return cmd << 16 | data;
}
-/* Push one word to the RX buffer from the POPR register (RX FIFO) */
-static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
+static size_t dspi_dma_max_datawords(struct fsl_dspi *dspi)
{
- if (!dspi->rx)
- return;
- dspi->dev_to_host(dspi, rxdata);
+ /*
+ * Transfers look like one of these, so we always use a full DMA word
+ * regardless of SPI word size:
+ *
+ * 31 16 15 0
+ * -----------------------------------------
+ * | CONTROL WORD | 16-bit DATA |
+ * -----------------------------------------
+ * or
+ * -----------------------------------------
+ * | CONTROL WORD | UNUSED | 8-bit DATA |
+ * -----------------------------------------
+ */
+ return dspi->dma->bufsize / DMA_SLAVE_BUSWIDTH_4_BYTES;
+}
+
+static size_t dspi_dma_transfer_size(struct fsl_dspi *dspi)
+{
+ return dspi->words_in_flight * DMA_SLAVE_BUSWIDTH_4_BYTES;
}
static void dspi_tx_dma_callback(void *arg)
{
struct fsl_dspi *dspi = arg;
struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+ dma_sync_single_for_cpu(dev, dma->tx_dma_phys,
+ dspi_dma_transfer_size(dspi), DMA_TO_DEVICE);
complete(&dma->cmd_tx_complete);
}
@@ -367,9 +544,13 @@ static void dspi_rx_dma_callback(void *arg)
{
struct fsl_dspi *dspi = arg;
struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
int i;
if (dspi->rx) {
+ dma_sync_single_for_cpu(dev, dma->rx_dma_phys,
+ dspi_dma_transfer_size(dspi),
+ DMA_FROM_DEVICE);
for (i = 0; i < dspi->words_in_flight; i++)
dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
}
@@ -379,20 +560,22 @@ static void dspi_rx_dma_callback(void *arg)
static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
{
+ size_t size = dspi_dma_transfer_size(dspi);
struct device *dev = &dspi->pdev->dev;
struct fsl_dspi_dma *dma = dspi->dma;
int time_left;
+ u32 spi_sr;
int i;
for (i = 0; i < dspi->words_in_flight; i++)
dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
+ dma_sync_single_for_device(dev, dma->tx_dma_phys, size, DMA_TO_DEVICE);
dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
- dma->tx_dma_phys,
- dspi->words_in_flight *
- DMA_SLAVE_BUSWIDTH_4_BYTES,
- DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ dma->tx_dma_phys, size,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
if (!dma->tx_desc) {
dev_err(dev, "Not able to get desc for DMA xfer\n");
return -EIO;
@@ -405,12 +588,13 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
return -EINVAL;
}
+ dma_sync_single_for_device(dev, dma->rx_dma_phys, size,
+ DMA_FROM_DEVICE);
dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
- dma->rx_dma_phys,
- dspi->words_in_flight *
- DMA_SLAVE_BUSWIDTH_4_BYTES,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ dma->rx_dma_phys, size,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
if (!dma->rx_desc) {
dev_err(dev, "Not able to get desc for DMA xfer\n");
return -EIO;
@@ -429,9 +613,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma_async_issue_pending(dma->chan_rx);
dma_async_issue_pending(dma->chan_tx);
- if (spi_controller_is_slave(dspi->ctlr)) {
+ if (spi_controller_is_target(dspi->ctlr)) {
wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
- return 0;
+ regmap_read(dspi->regmap, SPI_SR, &spi_sr);
+ return dspi_fifo_error(dspi, spi_sr);
}
time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
@@ -455,13 +640,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
return 0;
}
-static void dspi_setup_accel(struct fsl_dspi *dspi);
-
-static int dspi_dma_xfer(struct fsl_dspi *dspi)
+static void dspi_dma_xfer(struct fsl_dspi *dspi)
{
struct spi_message *message = dspi->cur_msg;
struct device *dev = &dspi->pdev->dev;
- int ret = 0;
/*
* dspi->len gets decremented by dspi_pop_tx_pushr in
@@ -471,26 +653,22 @@ static int dspi_dma_xfer(struct fsl_dspi *dspi)
/* Figure out operational bits-per-word for this chunk */
dspi_setup_accel(dspi);
- dspi->words_in_flight = dspi->len / dspi->oper_word_size;
- if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
- dspi->words_in_flight = dspi->devtype_data->fifo_size;
+ dspi->words_in_flight = min(dspi->len / dspi->oper_word_size,
+ dspi_dma_max_datawords(dspi));
message->actual_length += dspi->words_in_flight *
dspi->oper_word_size;
- ret = dspi_next_xfer_dma_submit(dspi);
- if (ret) {
+ message->status = dspi_next_xfer_dma_submit(dspi);
+ if (message->status) {
dev_err(dev, "DMA transfer failed\n");
break;
}
}
-
- return ret;
}
static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
{
- int dma_bufsize = dspi->devtype_data->fifo_size * 2;
struct device *dev = &dspi->pdev->dev;
struct dma_slave_config cfg;
struct fsl_dspi_dma *dma;
@@ -501,30 +679,39 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
return -ENOMEM;
dma->chan_rx = dma_request_chan(dev, "rx");
- if (IS_ERR(dma->chan_rx)) {
- dev_err(dev, "rx dma channel not available\n");
- ret = PTR_ERR(dma->chan_rx);
- return ret;
- }
+ if (IS_ERR(dma->chan_rx))
+ return dev_err_probe(dev, PTR_ERR(dma->chan_rx), "rx dma channel not available\n");
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
- dev_err(dev, "tx dma channel not available\n");
- ret = PTR_ERR(dma->chan_tx);
+ ret = dev_err_probe(dev, PTR_ERR(dma->chan_tx), "tx dma channel not available\n");
goto err_tx_channel;
}
- dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
- dma_bufsize, &dma->tx_dma_phys,
- GFP_KERNEL);
+ if (spi_controller_is_target(dspi->ctlr)) {
+ /*
+ * In target mode we have to be ready to receive the maximum
+ * that can possibly be transferred at once by EDMA without any
+ * FIFO underflows.
+ */
+ dma->bufsize = min(dma_get_max_seg_size(dma->chan_rx->device->dev),
+ dma_get_max_seg_size(dma->chan_tx->device->dev)) *
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ } else {
+ dma->bufsize = PAGE_SIZE;
+ }
+
+ dma->tx_dma_buf = dma_alloc_noncoherent(dma->chan_tx->device->dev,
+ dma->bufsize, &dma->tx_dma_phys,
+ DMA_TO_DEVICE, GFP_KERNEL);
if (!dma->tx_dma_buf) {
ret = -ENOMEM;
goto err_tx_dma_buf;
}
- dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
- dma_bufsize, &dma->rx_dma_phys,
- GFP_KERNEL);
+ dma->rx_dma_buf = dma_alloc_noncoherent(dma->chan_rx->device->dev,
+ dma->bufsize, &dma->rx_dma_phys,
+ DMA_FROM_DEVICE, GFP_KERNEL);
if (!dma->rx_dma_buf) {
ret = -ENOMEM;
goto err_rx_dma_buf;
@@ -541,16 +728,14 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
cfg.direction = DMA_DEV_TO_MEM;
ret = dmaengine_slave_config(dma->chan_rx, &cfg);
if (ret) {
- dev_err(dev, "can't configure rx dma channel\n");
- ret = -EINVAL;
+ dev_err_probe(dev, ret, "can't configure rx dma channel\n");
goto err_slave_config;
}
cfg.direction = DMA_MEM_TO_DEV;
ret = dmaengine_slave_config(dma->chan_tx, &cfg);
if (ret) {
- dev_err(dev, "can't configure tx dma channel\n");
- ret = -EINVAL;
+ dev_err_probe(dev, ret, "can't configure tx dma channel\n");
goto err_slave_config;
}
@@ -561,11 +746,12 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
return 0;
err_slave_config:
- dma_free_coherent(dma->chan_rx->device->dev,
- dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
+ dma_free_noncoherent(dma->chan_rx->device->dev, dma->bufsize,
+ dma->rx_dma_buf, dma->rx_dma_phys,
+ DMA_FROM_DEVICE);
err_rx_dma_buf:
- dma_free_coherent(dma->chan_tx->device->dev,
- dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
+ dma_free_noncoherent(dma->chan_tx->device->dev, dma->bufsize,
+ dma->tx_dma_buf, dma->tx_dma_phys, DMA_TO_DEVICE);
err_tx_dma_buf:
dma_release_channel(dma->chan_tx);
err_tx_channel:
@@ -579,27 +765,40 @@ err_tx_channel:
static void dspi_release_dma(struct fsl_dspi *dspi)
{
- int dma_bufsize = dspi->devtype_data->fifo_size * 2;
struct fsl_dspi_dma *dma = dspi->dma;
if (!dma)
return;
if (dma->chan_tx) {
- dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
- dma->tx_dma_buf, dma->tx_dma_phys);
+ dma_free_noncoherent(dma->chan_tx->device->dev, dma->bufsize,
+ dma->tx_dma_buf, dma->tx_dma_phys,
+ DMA_TO_DEVICE);
dma_release_channel(dma->chan_tx);
}
if (dma->chan_rx) {
- dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
- dma->rx_dma_buf, dma->rx_dma_phys);
+ dma_free_noncoherent(dma->chan_rx->device->dev, dma->bufsize,
+ dma->rx_dma_buf, dma->rx_dma_phys,
+ DMA_FROM_DEVICE);
dma_release_channel(dma->chan_rx);
}
}
+#else
+static void dspi_dma_xfer(struct fsl_dspi *dspi)
+{
+ dspi->cur_msg->status = -EINVAL;
+}
+static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
+{
+ dev_err(&dspi->pdev->dev, "DMA support not enabled in kernel\n");
+ return -EINVAL;
+}
+static void dspi_release_dma(struct fsl_dspi *dspi) {}
+#endif
static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
- unsigned long clkrate)
+ unsigned long clkrate, bool mtf_enabled)
{
/* Valid baud rate pre-scaler values */
int pbr_tbl[4] = {2, 3, 5, 7};
@@ -616,7 +815,13 @@ static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
for (i = 0; i < ARRAY_SIZE(brs); i++)
for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
- scale = brs[i] * pbr_tbl[j];
+ if (mtf_enabled) {
+ /* In MTF mode DBR=1 so frequency is doubled */
+ scale = (brs[i] * pbr_tbl[j]) / 2;
+ } else {
+ scale = brs[i] * pbr_tbl[j];
+ }
+
if (scale >= scale_needed) {
if (scale < minscale) {
minscale = scale;
@@ -750,8 +955,12 @@ static void dspi_setup_accel(struct fsl_dspi *dspi)
struct spi_transfer *xfer = dspi->cur_transfer;
bool odd = !!(dspi->len & 1);
- /* No accel for frames not multiple of 8 bits at the moment */
- if (xfer->bits_per_word % 8)
+ /*
+ * No accel for DMA transfers or frames not multiples of 8 bits at the
+ * moment.
+ */
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE ||
+ xfer->bits_per_word % 8)
goto no_accel;
if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
@@ -760,10 +969,7 @@ static void dspi_setup_accel(struct fsl_dspi *dspi)
dspi->oper_bits_per_word = 8;
} else {
/* Start off with maximum supported by hardware */
- if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
- dspi->oper_bits_per_word = 32;
- else
- dspi->oper_bits_per_word = 16;
+ dspi->oper_bits_per_word = 32;
/*
* And go down only if the buffer can't be sent with
@@ -851,41 +1057,55 @@ static void dspi_fifo_write(struct fsl_dspi *dspi)
dspi->progress, !dspi->irq);
}
-static int dspi_rxtx(struct fsl_dspi *dspi)
+/*
+ * Read the previous transfer from the FIFO and transmit the next one.
+ *
+ * Returns false if the buffer to be transmitted is empty, and true if there is
+ * still data to transmit.
+ */
+static bool dspi_rxtx(struct fsl_dspi *dspi)
{
dspi_fifo_read(dspi);
if (!dspi->len)
/* Success! */
- return 0;
+ return false;
dspi_fifo_write(dspi);
- return -EINPROGRESS;
+ return true;
}
-static int dspi_poll(struct fsl_dspi *dspi)
+static void dspi_poll(struct fsl_dspi *dspi)
{
- int tries = 1000;
+ int tries;
+ int err = 0;
u32 spi_sr;
do {
- regmap_read(dspi->regmap, SPI_SR, &spi_sr);
- regmap_write(dspi->regmap, SPI_SR, spi_sr);
-
- if (spi_sr & SPI_SR_CMDTCF)
+ for (tries = 1000; tries > 0; --tries) {
+ regmap_read(dspi->regmap, SPI_SR, &spi_sr);
+ regmap_write(dspi->regmap, SPI_SR, spi_sr);
+
+ dspi->cur_msg->status = dspi_fifo_error(dspi, spi_sr);
+ if (dspi->cur_msg->status)
+ return;
+ if (spi_sr & SPI_SR_CMDTCF)
+ break;
+ }
+ if (!tries) {
+ err = -ETIMEDOUT;
break;
- } while (--tries);
-
- if (!tries)
- return -ETIMEDOUT;
+ }
+ } while (dspi_rxtx(dspi));
- return dspi_rxtx(dspi);
+ dspi->cur_msg->status = err;
}
static irqreturn_t dspi_interrupt(int irq, void *dev_id)
{
struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
+ int status;
u32 spi_sr;
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
@@ -894,27 +1114,38 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
if (!(spi_sr & SPI_SR_CMDTCF))
return IRQ_NONE;
- if (dspi_rxtx(dspi) == 0)
+ status = dspi_fifo_error(dspi, spi_sr);
+ if (status) {
+ if (dspi->cur_msg)
+ WRITE_ONCE(dspi->cur_msg->status, status);
complete(&dspi->xfer_done);
+ return IRQ_HANDLED;
+ }
+
+ if (dspi_rxtx(dspi) == false) {
+ if (dspi->cur_msg)
+ WRITE_ONCE(dspi->cur_msg->status, 0);
+ complete(&dspi->xfer_done);
+ }
return IRQ_HANDLED;
}
static void dspi_assert_cs(struct spi_device *spi, bool *cs)
{
- if (!spi->cs_gpiod || *cs)
+ if (!spi_get_csgpiod(spi, 0) || *cs)
return;
- gpiod_set_value_cansleep(spi->cs_gpiod, true);
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), true);
*cs = true;
}
static void dspi_deassert_cs(struct spi_device *spi, bool *cs)
{
- if (!spi->cs_gpiod || !*cs)
+ if (!spi_get_csgpiod(spi, 0) || !*cs)
return;
- gpiod_set_value_cansleep(spi->cs_gpiod, false);
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), false);
*cs = false;
}
@@ -925,10 +1156,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
struct spi_device *spi = message->spi;
struct spi_transfer *transfer;
bool cs = false;
- int status = 0;
+ u32 val = 0;
+ bool cs_change = false;
message->actual_length = 0;
+ /* Put DSPI in running mode if halted. */
+ regmap_read(dspi->regmap, SPI_MCR, &val);
+ if (val & SPI_MCR_HALT) {
+ regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0);
+ while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
+ !(val & SPI_SR_TXRXS))
+ ;
+ }
+
list_for_each_entry(transfer, &message->transfers, transfer_list) {
dspi->cur_transfer = transfer;
dspi->cur_msg = message;
@@ -938,8 +1179,8 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
/* Prepare command word for CMD FIFO */
dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0);
- if (!spi->cs_gpiod)
- dspi->tx_cmd |= SPI_PUSHR_CMD_PCS(spi->chip_select);
+ if (!spi_get_csgpiod(spi, 0))
+ dspi->tx_cmd |= SPI_PUSHR_CMD_PCS(spi_get_chipselect(spi, 0));
if (list_is_last(&dspi->cur_transfer->transfer_list,
&dspi->cur_msg->transfers)) {
@@ -958,6 +1199,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
}
+ cs_change = transfer->cs_change;
dspi->tx = transfer->tx_buf;
dspi->rx = transfer->rx_buf;
dspi->len = transfer->len;
@@ -967,24 +1209,32 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
dspi->progress, !dspi->irq);
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
- status = dspi_dma_xfer(dspi);
+ dspi_dma_xfer(dspi);
} else {
+ /*
+ * Reinitialize the completion before transferring data
+ * to avoid the case where it might remain in the done
+ * state due to a spurious interrupt from a previous
+ * transfer. This could falsely signal that the current
+ * transfer has completed.
+ */
+ if (dspi->irq)
+ reinit_completion(&dspi->xfer_done);
+
dspi_fifo_write(dspi);
- if (dspi->irq) {
+ if (dspi->irq)
wait_for_completion(&dspi->xfer_done);
- reinit_completion(&dspi->xfer_done);
- } else {
- do {
- status = dspi_poll(dspi);
- } while (status == -EINPROGRESS);
- }
+ else
+ dspi_poll(dspi);
}
- if (status)
+ if (READ_ONCE(message->status))
break;
spi_transfer_delay_exec(transfer);
@@ -993,22 +1243,49 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi_deassert_cs(spi, &cs);
}
- message->status = status;
+ dspi->cur_msg = NULL;
+ if (message->status || !cs_change) {
+ /* Put DSPI in stop mode */
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_HALT, SPI_MCR_HALT);
+ while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
+ val & SPI_SR_TXRXS)
+ ;
+ }
+
spi_finalize_current_message(ctlr);
- return status;
+ return message->status;
+}
+
+static int dspi_set_mtf(struct fsl_dspi *dspi)
+{
+ if (spi_controller_is_target(dspi->ctlr))
+ return 0;
+
+ if (dspi->mtf_enabled)
+ regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_MTFE,
+ SPI_MCR_MTFE);
+ else
+ regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_MTFE, 0);
+
+ return 0;
}
static int dspi_setup(struct spi_device *spi)
{
struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
+ u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
+ u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
u32 cs_sck_delay = 0, sck_cs_delay = 0;
struct fsl_dspi_platform_data *pdata;
unsigned char pasc = 0, asc = 0;
+ struct gpio_desc *gpio_cs;
struct chip_data *chip;
unsigned long clkrate;
bool cs = true;
+ int val;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
@@ -1021,18 +1298,48 @@ static int dspi_setup(struct spi_device *spi)
pdata = dev_get_platdata(&dspi->pdev->dev);
if (!pdata) {
- of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
- &cs_sck_delay);
-
- of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
- &sck_cs_delay);
+ val = spi_delay_to_ns(&spi->cs_setup, NULL);
+ cs_sck_delay = val >= 0 ? val : 0;
+ if (!cs_sck_delay)
+ of_property_read_u32(spi->dev.of_node,
+ "fsl,spi-cs-sck-delay",
+ &cs_sck_delay);
+
+ val = spi_delay_to_ns(&spi->cs_hold, NULL);
+ sck_cs_delay = val >= 0 ? val : 0;
+ if (!sck_cs_delay)
+ of_property_read_u32(spi->dev.of_node,
+ "fsl,spi-sck-cs-delay",
+ &sck_cs_delay);
} else {
cs_sck_delay = pdata->cs_sck_delay;
sck_cs_delay = pdata->sck_cs_delay;
}
+ /* Since tCSC and tASC apply to continuous transfers too, avoid SCK
+ * glitches of half a cycle by never allowing tCSC + tASC to go below
+ * half a SCK period.
+ */
+ if (cs_sck_delay < quarter_period_ns)
+ cs_sck_delay = quarter_period_ns;
+ if (sck_cs_delay < quarter_period_ns)
+ sck_cs_delay = quarter_period_ns;
+
+ dev_dbg(&spi->dev,
+ "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
+ cs_sck_delay, sck_cs_delay);
+
clkrate = clk_get_rate(dspi->clk);
- hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
+
+ if (is_s32g_dspi(dspi) && spi->max_speed_hz > SPI_25MHZ)
+ dspi->mtf_enabled = true;
+ else
+ dspi->mtf_enabled = false;
+
+ dspi_set_mtf(dspi);
+
+ hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate,
+ dspi->mtf_enabled);
/* Set PCS to SCK delay scale values */
ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
@@ -1046,7 +1353,7 @@ static int dspi_setup(struct spi_device *spi)
if (spi->mode & SPI_CPHA)
chip->ctar_val |= SPI_CTAR_CPHA;
- if (!spi_controller_is_slave(dspi->ctlr)) {
+ if (!spi_controller_is_target(dspi->ctlr)) {
chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
SPI_CTAR_CSSCK(cssck) |
SPI_CTAR_PASC(pasc) |
@@ -1054,11 +1361,17 @@ static int dspi_setup(struct spi_device *spi)
SPI_CTAR_PBR(pbr) |
SPI_CTAR_BR(br);
+ if (dspi->mtf_enabled)
+ chip->ctar_val |= SPI_CTAR_DBR;
+
if (spi->mode & SPI_LSB_FIRST)
chip->ctar_val |= SPI_CTAR_LSBFE;
}
- gpiod_direction_output(spi->cs_gpiod, false);
+ gpio_cs = spi_get_csgpiod(spi, 0);
+ if (gpio_cs)
+ gpiod_direction_output(gpio_cs, false);
+
dspi_deassert_cs(spi, &cs);
spi_set_ctldata(spi, chip);
@@ -1068,10 +1381,10 @@ static int dspi_setup(struct spi_device *spi)
static void dspi_cleanup(struct spi_device *spi)
{
- struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+ struct chip_data *chip = spi_get_ctldata(spi);
dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
- spi->controller->bus_num, spi->chip_select);
+ spi->controller->bus_num, spi_get_chipselect(spi, 0));
kfree(chip);
}
@@ -1104,11 +1417,49 @@ static const struct of_device_id fsl_dspi_dt_ids[] = {
}, {
.compatible = "fsl,lx2160a-dspi",
.data = &devtype_data[LX2160A],
+ }, {
+ .compatible = "nxp,s32g2-dspi",
+ .data = &devtype_data[S32G],
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
+static int dspi_init(struct fsl_dspi *dspi)
+{
+ unsigned int mcr;
+
+ /* Set idle states for all chip select signals to high */
+ mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0));
+
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
+ mcr |= SPI_MCR_XSPI;
+ if (!spi_controller_is_target(dspi->ctlr))
+ mcr |= SPI_MCR_HOST;
+
+ mcr |= SPI_MCR_HALT;
+
+ regmap_write(dspi->regmap, SPI_MCR, mcr);
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+
+ switch (dspi->devtype_data->trans_mode) {
+ case DSPI_XSPI_MODE:
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
+ break;
+ case DSPI_DMA_MODE:
+ regmap_write(dspi->regmap, SPI_RSER,
+ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
+ SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+ break;
+ default:
+ dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
+ dspi->devtype_data->trans_mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int dspi_suspend(struct device *dev)
{
@@ -1135,6 +1486,15 @@ static int dspi_resume(struct device *dev)
if (ret)
return ret;
spi_controller_resume(dspi->ctlr);
+
+ ret = dspi_init(dspi);
+ if (ret) {
+ dev_err(dev, "failed to initialize dspi during resume\n");
+ return ret;
+ }
+
+ dspi_set_mtf(dspi);
+
if (dspi->irq)
enable_irq(dspi->irq);
@@ -1144,94 +1504,13 @@ static int dspi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
-static const struct regmap_range dspi_volatile_ranges[] = {
- regmap_reg_range(SPI_MCR, SPI_TCR),
- regmap_reg_range(SPI_SR, SPI_SR),
- regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
-};
-
-static const struct regmap_access_table dspi_volatile_table = {
- .yes_ranges = dspi_volatile_ranges,
- .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
-};
-
-static const struct regmap_config dspi_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = 0x88,
- .volatile_table = &dspi_volatile_table,
-};
-
-static const struct regmap_range dspi_xspi_volatile_ranges[] = {
- regmap_reg_range(SPI_MCR, SPI_TCR),
- regmap_reg_range(SPI_SR, SPI_SR),
- regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
- regmap_reg_range(SPI_SREX, SPI_SREX),
-};
-
-static const struct regmap_access_table dspi_xspi_volatile_table = {
- .yes_ranges = dspi_xspi_volatile_ranges,
- .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
-};
-
-static const struct regmap_config dspi_xspi_regmap_config[] = {
- {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = 0x13c,
- .volatile_table = &dspi_xspi_volatile_table,
- },
- {
- .name = "pushr",
- .reg_bits = 16,
- .val_bits = 16,
- .reg_stride = 2,
- .max_register = 0x2,
- },
-};
-
-static int dspi_init(struct fsl_dspi *dspi)
+static int dspi_target_abort(struct spi_controller *host)
{
- unsigned int mcr;
-
- /* Set idle states for all chip select signals to high */
- mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0));
-
- if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
- mcr |= SPI_MCR_XSPI;
- if (!spi_controller_is_slave(dspi->ctlr))
- mcr |= SPI_MCR_MASTER;
-
- regmap_write(dspi->regmap, SPI_MCR, mcr);
- regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
-
- switch (dspi->devtype_data->trans_mode) {
- case DSPI_XSPI_MODE:
- regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
- break;
- case DSPI_DMA_MODE:
- regmap_write(dspi->regmap, SPI_RSER,
- SPI_RSER_TFFFE | SPI_RSER_TFFFD |
- SPI_RSER_RFDFE | SPI_RSER_RFDFD);
- break;
- default:
- dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
- dspi->devtype_data->trans_mode);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int dspi_slave_abort(struct spi_master *master)
-{
- struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ struct fsl_dspi *dspi = spi_controller_get_devdata(host);
/*
* Terminate all pending DMA transactions for the SPI working
- * in SLAVE mode.
+ * in TARGET mode.
*/
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
dmaengine_terminate_sync(dspi->dma->chan_rx);
@@ -1249,7 +1528,6 @@ static int dspi_slave_abort(struct spi_master *master)
static int dspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- const struct regmap_config *regmap_config;
struct fsl_dspi_platform_data *pdata;
struct spi_controller *ctlr;
int ret, cs_num, bus_num = -1;
@@ -1262,7 +1540,10 @@ static int dspi_probe(struct platform_device *pdev)
if (!dspi)
return -ENOMEM;
- ctlr = spi_alloc_master(&pdev->dev, 0);
+ if (of_property_read_bool(np, "spi-slave"))
+ ctlr = spi_alloc_target(&pdev->dev, 0);
+ else
+ ctlr = spi_alloc_host(&pdev->dev, 0);
if (!ctlr)
return -ENOMEM;
@@ -1277,7 +1558,7 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->cleanup = dspi_cleanup;
- ctlr->slave_abort = dspi_slave_abort;
+ ctlr->target_abort = dspi_target_abort;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
ctlr->use_gpio_descriptors = true;
@@ -1301,9 +1582,6 @@ static int dspi_probe(struct platform_device *pdev)
of_property_read_u32(np, "bus-num", &bus_num);
ctlr->bus_num = bus_num;
- if (of_property_read_bool(np, "spi-slave"))
- ctlr->slave = true;
-
dspi->devtype_data = of_device_get_match_data(&pdev->dev);
if (!dspi->devtype_data) {
dev_err(&pdev->dev, "can't get devtype_data\n");
@@ -1321,6 +1599,9 @@ static int dspi_probe(struct platform_device *pdev)
dspi->pushr_tx = 0;
}
+ if (spi_controller_is_target(ctlr) && is_s32g_dspi(dspi))
+ dspi->devtype_data = &devtype_data[S32G_TARGET];
+
if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
else
@@ -1332,11 +1613,8 @@ static int dspi_probe(struct platform_device *pdev)
goto out_ctlr_put;
}
- if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
- regmap_config = &dspi_xspi_regmap_config[0];
- else
- regmap_config = &dspi_regmap_config;
- dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
+ dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ dspi->devtype_data->regmap);
if (IS_ERR(dspi->regmap)) {
dev_err(&pdev->dev, "failed to init regmap: %ld\n",
PTR_ERR(dspi->regmap));
@@ -1347,7 +1625,7 @@ static int dspi_probe(struct platform_device *pdev)
if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
dspi->regmap_pushr = devm_regmap_init_mmio(
&pdev->dev, base + SPI_PUSHR,
- &dspi_xspi_regmap_config[1]);
+ &dspi_regmap_config[DSPI_PUSHR]);
if (IS_ERR(dspi->regmap_pushr)) {
dev_err(&pdev->dev,
"failed to init pushr regmap: %ld\n",
@@ -1357,19 +1635,16 @@ static int dspi_probe(struct platform_device *pdev)
}
}
- dspi->clk = devm_clk_get(&pdev->dev, "dspi");
+ dspi->clk = devm_clk_get_enabled(&pdev->dev, "dspi");
if (IS_ERR(dspi->clk)) {
ret = PTR_ERR(dspi->clk);
dev_err(&pdev->dev, "unable to get clock\n");
goto out_ctlr_put;
}
- ret = clk_prepare_enable(dspi->clk);
- if (ret)
- goto out_ctlr_put;
ret = dspi_init(dspi);
if (ret)
- goto out_clk_put;
+ goto out_ctlr_put;
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq <= 0) {
@@ -1385,7 +1660,7 @@ static int dspi_probe(struct platform_device *pdev)
IRQF_SHARED, pdev->name, dspi);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
- goto out_clk_put;
+ goto out_ctlr_put;
}
poll_mode:
@@ -1417,15 +1692,13 @@ out_release_dma:
out_free_irq:
if (dspi->irq)
free_irq(dspi->irq, dspi);
-out_clk_put:
- clk_disable_unprepare(dspi->clk);
out_ctlr_put:
spi_controller_put(ctlr);
return ret;
}
-static int dspi_remove(struct platform_device *pdev)
+static void dspi_remove(struct platform_device *pdev)
{
struct fsl_dspi *dspi = platform_get_drvdata(pdev);
@@ -1443,9 +1716,6 @@ static int dspi_remove(struct platform_device *pdev)
dspi_release_dma(dspi);
if (dspi->irq)
free_irq(dspi->irq, dspi);
- clk_disable_unprepare(dspi->clk);
-
- return 0;
}
static void dspi_shutdown(struct platform_device *pdev)
@@ -1456,7 +1726,6 @@ static void dspi_shutdown(struct platform_device *pdev)
static struct platform_driver fsl_dspi_driver = {
.driver.name = DRIVER_NAME,
.driver.of_match_table = fsl_dspi_dt_ids,
- .driver.owner = THIS_MODULE,
.driver.pm = &dspi_pm,
.probe = dspi_probe,
.remove = dspi_remove,