summaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 10:53:37 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 10:53:37 -0800
commit13e574b4941ee1931f8c70f33c3011f74e5fbd30 (patch)
tree786e3febef35e2db314f96547d04d261733c4f05 /drivers/spi
parent0175ec3a28c695562a08fdccf73f2ec5ed744e2f (diff)
parentde82c25dab9ac0fa01c95b8914bde8d9ce528e93 (diff)
Merge tag 'spi-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
Pull spi updates from Mark Brown: "This has been a fairly quiet release for SPI, though it is likely that the next release will have some big changes as there's some preparatory work for multiple chip select support gone in - the rest of the code is on the list but will need to be rebased onto -rc1. Otherwise there's a couple of new tunables for chip select timings, some new devices and smaller device specific updates and fixes. - Support for configuring the hold and minimum inactive times for chip selects. - Beginnings of support for supporting devices which have multiple chip selects on a single device. - Support for newer Broadcom HSSPI and Intel controllers, Silicon Labs EM3581 and SI3210" * tag 'spi-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (67 commits) spi: dt-bindings: qcom,spi-qcom-qspi: document OPP and power-domains spi: spidev: drop the incorrect notice from Kconfig spi: bcm63xx-hsspi: fix error code in probe spi: bcmbca-hsspi: Fix error code in probe() function spi: synquacer: Fix timeout handling in synquacer_spi_transfer_one() spi: intel: Check number of chip selects after reading the descriptor spi: xilinx: add force_irq for QSPI mode spi: spi-st-ssc: convert to DT schema spi: Reorder fields in 'struct spi_transfer' spi: cadence-quadspi: use STIG mode for small reads spi: cadence-quadspi: setup ADDR Bits in cmd reads spi: cadence-quadspi: Add flag for direct mode writes spi: cadence-quadspi: Reset CMD_CTRL Reg on cmd r/w completion MAINTAINERS: Remove file reference for Broadcom Broadband SoC HS SPI driver entry spi: bcm63xx-hsspi: bcmbca-hsspi: fix _be16 type usage MAINTAINERS: Add entry for Broadcom Broadband SoC HS SPI drivers spi: bcmbca-hsspi: Add driver for newer HSSPI controller spi: bcm63xx-hsspi: Disable spi mem dual io read op support spi: spi-mem: Allow controller supporting mem_ops without exec_op spi: bcm63xx-hsspi: Add prepend mode support ...
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig38
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/atmel-quadspi.c8
-rw-r--r--drivers/spi/spi-altera-core.c30
-rw-r--r--drivers/spi/spi-altera-dfl.c36
-rw-r--r--drivers/spi/spi-altera-platform.c36
-rw-r--r--drivers/spi/spi-ar934x.c10
-rw-r--r--drivers/spi/spi-armada-3700.c98
-rw-r--r--drivers/spi/spi-at91-usart.c40
-rw-r--r--drivers/spi/spi-ath79.c40
-rw-r--r--drivers/spi/spi-atmel.c254
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c493
-rw-r--r--drivers/spi/spi-bcmbca-hsspi.c654
-rw-r--r--drivers/spi/spi-cadence-quadspi.c42
-rw-r--r--drivers/spi/spi-geni-qcom.c211
-rw-r--r--drivers/spi/spi-intel-pci.c13
-rw-r--r--drivers/spi/spi-intel.c10
-rw-r--r--drivers/spi/spi-loopback-test.c8
-rw-r--r--drivers/spi/spi-mem.c2
-rw-r--r--drivers/spi/spi-mtk-snfi.c41
-rw-r--r--drivers/spi/spi-pl022.c1
-rw-r--r--drivers/spi/spi-synquacer.c7
-rw-r--r--drivers/spi/spi-xilinx.c9
-rw-r--r--drivers/spi/spi.c69
-rw-r--r--drivers/spi/spidev.c13
25 files changed, 1728 insertions, 436 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 2aba88a57a77..47bbba04fe3a 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -199,6 +199,15 @@ config SPI_BCM_QSPI
based platforms. This driver works for both SPI master for SPI NOR
flash device as well as MSPI device.
+config SPI_BCMBCA_HSSPI
+ tristate "Broadcom BCMBCA HS SPI controller driver"
+ depends on ARCH_BCMBCA || COMPILE_TEST
+ help
+ This enables support for the High Speed SPI controller present on
+ newer Broadcom BCMBCA SoCs. These SoCs include an updated SPI controller
+ that adds the capability to allow the driver to control chip select
+ explicitly.
+
config SPI_BITBANG
tristate "Utilities for Bitbanging SPI masters"
help
@@ -247,7 +256,7 @@ config SPI_CADENCE_XSPI
Enable support for the Cadence XSPI Flash controller.
Cadence XSPI is a specialized controller for connecting an SPI
- Flash over upto 8bit wide bus. Enable this option if you have a
+ Flash over up to 8-bit wide bus. Enable this option if you have a
device with a Cadence XSPI controller and want to access the
Flash as an MTD device.
@@ -295,7 +304,6 @@ config SPI_DW_BT1
tristate "Baikal-T1 SPI driver for DW SPI core"
depends on MIPS_BAIKAL_T1 || COMPILE_TEST
select MULTIPLEXER
- select MUX_MMIO
help
Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI
controllers. Two of them are pretty much normal: with IRQ, DMA,
@@ -448,19 +456,19 @@ config SPI_INTEL
tristate
config SPI_INTEL_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
+ tristate "Intel PCH/PCU SPI flash PCI driver"
depends on PCI
depends on X86 || COMPILE_TEST
depends on SPI_MEM
select SPI_INTEL
help
This enables PCI support for the Intel PCH/PCU SPI controller in
- master mode. This controller is present in modern Intel hardware
- and is used to hold BIOS and other persistent settings. Using
- this driver it is possible to upgrade BIOS directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
+ master mode. This controller is used to hold BIOS and other
+ persistent settings. Controllers present in modern Intel hardware
+ only work in hardware sequencing mode, this means that the
+ controller exposes a subset of operations that makes it safer to
+ use. Using this driver it is possible to upgrade BIOS directly
+ from Linux.
To compile this driver as a module, choose M here: the module
will be called spi-intel-pci.
@@ -472,10 +480,11 @@ config SPI_INTEL_PLATFORM
select SPI_INTEL
help
This enables platform support for the Intel PCH/PCU SPI
- controller in master mode. This controller is present in modern
- Intel hardware and is used to hold BIOS and other persistent
- settings. Using this driver it is possible to upgrade BIOS
- directly from Linux.
+ controller in master mode that is used to hold BIOS and other
+ persistent settings. Most of these controllers work in
+ software sequencing mode, which means that the controller
+ exposes the low level SPI-NOR opcodes to the software. Using
+ this driver it is possible to upgrade BIOS directly from Linux.
Say N here unless you know what you are doing. Overwriting the
SPI flash may render the system unbootable.
@@ -1142,9 +1151,6 @@ config SPI_SPIDEV
help
This supports user mode SPI protocol drivers.
- Note that this application programming interface is EXPERIMENTAL
- and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes.
-
config SPI_LOOPBACK_TEST
tristate "spi loopback test framework support"
depends on m
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 12648f75a919..d87cf75bee6a 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
+obj-$(CONFIG_SPI_BCMBCA_HSSPI) += spi-bcmbca-hsspi.o
obj-$(CONFIG_SPI_BCM_QSPI) += spi-iproc-qspi.o spi-brcmstb-qspi.o spi-bcm-qspi.o
obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 70637e46290a..f4632cb07495 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -406,7 +406,7 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
u32 sr, offset;
int err;
@@ -476,7 +476,7 @@ static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
static int atmel_qspi_setup(struct spi_device *spi)
{
- struct spi_controller *ctrl = spi->master;
+ struct spi_controller *ctrl = spi->controller;
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
unsigned long src_rate;
u32 scbr;
@@ -512,7 +512,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
static int atmel_qspi_set_cs_timing(struct spi_device *spi)
{
- struct spi_controller *ctrl = spi->master;
+ struct spi_controller *ctrl = spi->controller;
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
unsigned long clk_rate;
u32 cs_setup;
@@ -582,7 +582,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
struct resource *res;
int irq, err = 0;
- ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*aq));
+ ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*aq));
if (!ctrl)
return -ENOMEM;
diff --git a/drivers/spi/spi-altera-core.c b/drivers/spi/spi-altera-core.c
index de4d31c530d9..94fe6bf1b9a6 100644
--- a/drivers/spi/spi-altera-core.c
+++ b/drivers/spi/spi-altera-core.c
@@ -24,7 +24,7 @@
#define ALTERA_SPI_TXDATA 4
#define ALTERA_SPI_STATUS 8
#define ALTERA_SPI_CONTROL 12
-#define ALTERA_SPI_SLAVE_SEL 20
+#define ALTERA_SPI_TARGET_SEL 20
#define ALTERA_SPI_STATUS_ROE_MSK 0x8
#define ALTERA_SPI_STATUS_TOE_MSK 0x10
@@ -67,7 +67,7 @@ static int altr_spi_readl(struct altera_spi *hw, unsigned int reg,
static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev)
{
- return spi_master_get_devdata(sdev->master);
+ return spi_controller_get_devdata(sdev->controller);
}
static void altera_spi_set_cs(struct spi_device *spi, bool is_high)
@@ -77,9 +77,9 @@ static void altera_spi_set_cs(struct spi_device *spi, bool is_high)
if (is_high) {
hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK;
altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
- altr_spi_writel(hw, ALTERA_SPI_SLAVE_SEL, 0);
+ altr_spi_writel(hw, ALTERA_SPI_TARGET_SEL, 0);
} else {
- altr_spi_writel(hw, ALTERA_SPI_SLAVE_SEL,
+ altr_spi_writel(hw, ALTERA_SPI_TARGET_SEL,
BIT(spi->chip_select));
hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK;
altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
@@ -139,10 +139,10 @@ static void altera_spi_rx_word(struct altera_spi *hw)
hw->count++;
}
-static int altera_spi_txrx(struct spi_master *master,
+static int altera_spi_txrx(struct spi_controller *host,
struct spi_device *spi, struct spi_transfer *t)
{
- struct altera_spi *hw = spi_master_get_devdata(master);
+ struct altera_spi *hw = spi_controller_get_devdata(host);
u32 val;
hw->tx = t->tx_buf;
@@ -175,15 +175,15 @@ static int altera_spi_txrx(struct spi_master *master,
altera_spi_rx_word(hw);
}
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return 0;
}
irqreturn_t altera_spi_irq(int irq, void *dev)
{
- struct spi_master *master = dev;
- struct altera_spi *hw = spi_master_get_devdata(master);
+ struct spi_controller *host = dev;
+ struct altera_spi *hw = spi_controller_get_devdata(host);
altera_spi_rx_word(hw);
@@ -194,20 +194,20 @@ irqreturn_t altera_spi_irq(int irq, void *dev)
hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK;
altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
}
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(altera_spi_irq);
-void altera_spi_init_master(struct spi_master *master)
+void altera_spi_init_host(struct spi_controller *host)
{
- struct altera_spi *hw = spi_master_get_devdata(master);
+ struct altera_spi *hw = spi_controller_get_devdata(host);
u32 val;
- master->transfer_one = altera_spi_txrx;
- master->set_cs = altera_spi_set_cs;
+ host->transfer_one = altera_spi_txrx;
+ host->set_cs = altera_spi_set_cs;
/* program defaults into the registers */
hw->imr = 0; /* disable spi interrupts */
@@ -217,6 +217,6 @@ void altera_spi_init_master(struct spi_master *master)
if (val & ALTERA_SPI_STATUS_RRDY_MSK)
altr_spi_readl(hw, ALTERA_SPI_RXDATA, &val); /* flush rxdata */
}
-EXPORT_SYMBOL_GPL(altera_spi_init_master);
+EXPORT_SYMBOL_GPL(altera_spi_init_host);
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-altera-dfl.c b/drivers/spi/spi-altera-dfl.c
index 596e181ae136..5d6e08c12dff 100644
--- a/drivers/spi/spi-altera-dfl.c
+++ b/drivers/spi/spi-altera-dfl.c
@@ -104,20 +104,20 @@ static const struct regmap_config indirect_regbus_cfg = {
.reg_read = indirect_bus_reg_read,
};
-static void config_spi_master(void __iomem *base, struct spi_master *master)
+static void config_spi_host(void __iomem *base, struct spi_controller *host)
{
u64 v;
v = readq(base + SPI_CORE_PARAMETER);
- master->mode_bits = SPI_CS_HIGH;
+ host->mode_bits = SPI_CS_HIGH;
if (FIELD_GET(CLK_POLARITY, v))
- master->mode_bits |= SPI_CPOL;
+ host->mode_bits |= SPI_CPOL;
if (FIELD_GET(CLK_PHASE, v))
- master->mode_bits |= SPI_CPHA;
+ host->mode_bits |= SPI_CPHA;
- master->num_chipselect = FIELD_GET(NUM_CHIPSELECT, v);
- master->bits_per_word_mask =
+ host->num_chipselect = FIELD_GET(NUM_CHIPSELECT, v);
+ host->bits_per_word_mask =
SPI_BPW_RANGE_MASK(1, FIELD_GET(DATA_WIDTH, v));
}
@@ -125,18 +125,18 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
{
struct spi_board_info board_info = { 0 };
struct device *dev = &dfl_dev->dev;
- struct spi_master *master;
+ struct spi_controller *host;
struct altera_spi *hw;
void __iomem *base;
int err;
- master = devm_spi_alloc_master(dev, sizeof(struct altera_spi));
- if (!master)
+ host = devm_spi_alloc_host(dev, sizeof(struct altera_spi));
+ if (!host)
return -ENOMEM;
- master->bus_num = -1;
+ host->bus_num = -1;
- hw = spi_master_get_devdata(master);
+ hw = spi_controller_get_devdata(host);
hw->dev = dev;
@@ -145,10 +145,10 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
if (IS_ERR(base))
return PTR_ERR(base);
- config_spi_master(base, master);
+ config_spi_host(base, host);
dev_dbg(dev, "%s cs %u bpm 0x%x mode 0x%x\n", __func__,
- master->num_chipselect, master->bits_per_word_mask,
- master->mode_bits);
+ host->num_chipselect, host->bits_per_word_mask,
+ host->mode_bits);
hw->regmap = devm_regmap_init(dev, NULL, base, &indirect_regbus_cfg);
if (IS_ERR(hw->regmap))
@@ -156,11 +156,11 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
hw->irq = -EINVAL;
- altera_spi_init_master(master);
+ altera_spi_init_host(host);
- err = devm_spi_register_master(dev, master);
+ err = devm_spi_register_controller(dev, host);
if (err)
- return dev_err_probe(dev, err, "%s failed to register spi master\n",
+ return dev_err_probe(dev, err, "%s failed to register spi host\n",
__func__);
if (dfl_dev->revision == FME_FEATURE_REV_MAX10_SPI_N5010)
@@ -172,7 +172,7 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
board_info.bus_num = 0;
board_info.chip_select = 0;
- if (!spi_new_device(master, &board_info)) {
+ if (!spi_new_device(host, &board_info)) {
dev_err(dev, "%s failed to create SPI device: %s\n",
__func__, board_info.modalias);
}
diff --git a/drivers/spi/spi-altera-platform.c b/drivers/spi/spi-altera-platform.c
index 65147aae82a1..72e7a0f21793 100644
--- a/drivers/spi/spi-altera-platform.c
+++ b/drivers/spi/spi-altera-platform.c
@@ -39,16 +39,16 @@ static int altera_spi_probe(struct platform_device *pdev)
struct altera_spi_platform_data *pdata = dev_get_platdata(&pdev->dev);
enum altera_spi_type type = ALTERA_SPI_TYPE_UNKNOWN;
struct altera_spi *hw;
- struct spi_master *master;
+ struct spi_controller *host;
int err = -ENODEV;
u16 i;
- master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(struct altera_spi));
+ if (!host)
return err;
- /* setup the master state. */
- master->bus_num = -1;
+ /* setup the host state. */
+ host->bus_num = -1;
if (pdata) {
if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
@@ -59,18 +59,18 @@ static int altera_spi_probe(struct platform_device *pdev)
goto exit;
}
- master->num_chipselect = pdata->num_chipselect;
- master->mode_bits = pdata->mode_bits;
- master->bits_per_word_mask = pdata->bits_per_word_mask;
+ host->num_chipselect = pdata->num_chipselect;
+ host->mode_bits = pdata->mode_bits;
+ host->bits_per_word_mask = pdata->bits_per_word_mask;
} else {
- master->num_chipselect = 16;
- master->mode_bits = SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ host->num_chipselect = 16;
+ host->mode_bits = SPI_CS_HIGH;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
}
- master->dev.of_node = pdev->dev.of_node;
+ host->dev.of_node = pdev->dev.of_node;
- hw = spi_master_get_devdata(master);
+ hw = spi_controller_get_devdata(host);
hw->dev = &pdev->dev;
if (platid)
@@ -107,24 +107,24 @@ static int altera_spi_probe(struct platform_device *pdev)
}
}
- altera_spi_init_master(master);
+ altera_spi_init_host(host);
/* irq is optional */
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq >= 0) {
err = devm_request_irq(&pdev->dev, hw->irq, altera_spi_irq, 0,
- pdev->name, master);
+ pdev->name, host);
if (err)
goto exit;
}
- err = devm_spi_register_master(&pdev->dev, master);
+ err = devm_spi_register_controller(&pdev->dev, host);
if (err)
goto exit;
if (pdata) {
for (i = 0; i < pdata->num_devices; i++) {
- if (!spi_new_device(master, pdata->devices + i))
+ if (!spi_new_device(host, pdata->devices + i))
dev_warn(&pdev->dev,
"unable to create SPI device: %s\n",
pdata->devices[i].modalias);
@@ -135,7 +135,7 @@ static int altera_spi_probe(struct platform_device *pdev)
return 0;
exit:
- spi_master_put(master);
+ spi_controller_put(host);
return err;
}
diff --git a/drivers/spi/spi-ar934x.c b/drivers/spi/spi-ar934x.c
index ec7250c4c810..4a6ecaa0a9c9 100644
--- a/drivers/spi/spi-ar934x.c
+++ b/drivers/spi/spi-ar934x.c
@@ -61,7 +61,7 @@ static inline int ar934x_spi_clk_div(struct ar934x_spi *sp, unsigned int freq)
static int ar934x_spi_setup(struct spi_device *spi)
{
- struct ar934x_spi *sp = spi_controller_get_devdata(spi->master);
+ struct ar934x_spi *sp = spi_controller_get_devdata(spi->controller);
if ((spi->max_speed_hz == 0) ||
(spi->max_speed_hz > (sp->clk_freq / 2))) {
@@ -74,10 +74,10 @@ static int ar934x_spi_setup(struct spi_device *spi)
return 0;
}
-static int ar934x_spi_transfer_one_message(struct spi_controller *master,
+static int ar934x_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *m)
{
- struct ar934x_spi *sp = spi_controller_get_devdata(master);
+ struct ar934x_spi *sp = spi_controller_get_devdata(ctlr);
struct spi_transfer *t = NULL;
struct spi_device *spi = m->spi;
unsigned long trx_done, trx_cur;
@@ -150,7 +150,7 @@ static int ar934x_spi_transfer_one_message(struct spi_controller *master,
msg_done:
m->status = stat;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(ctlr);
return 0;
}
@@ -183,7 +183,7 @@ static int ar934x_spi_probe(struct platform_device *pdev)
if (ret)
return ret;
- ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp));
+ ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*sp));
if (!ctlr) {
dev_info(&pdev->dev, "failed to allocate spi controller\n");
ret = -ENOMEM;
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 9df9fc40b783..4d554b948d71 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -100,7 +100,7 @@
#define A3700_SPI_CLK_CAPT_EDGE BIT(7)
struct a3700_spi {
- struct spi_master *master;
+ struct spi_controller *host;
void __iomem *base;
struct clk *clk;
unsigned int irq;
@@ -174,7 +174,7 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
val |= A3700_SPI_ADDR_PIN;
break;
default:
- dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
+ dev_err(&a3700_spi->host->dev, "wrong pin mode %u", pin_mode);
return -EINVAL;
}
@@ -278,7 +278,7 @@ static int a3700_spi_fifo_flush(struct a3700_spi *a3700_spi)
static void a3700_spi_init(struct a3700_spi *a3700_spi)
{
- struct spi_master *master = a3700_spi->master;
+ struct spi_controller *host = a3700_spi->host;
u32 val;
int i;
@@ -295,14 +295,14 @@ static void a3700_spi_init(struct a3700_spi *a3700_spi)
/* Disable AUTO_CS and deactivate all chip-selects */
a3700_spi_auto_cs_unset(a3700_spi);
- for (i = 0; i < master->num_chipselect; i++)
+ for (i = 0; i < host->num_chipselect; i++)
a3700_spi_deactivate_cs(a3700_spi, i);
/* Enable FIFO mode */
a3700_spi_fifo_mode_set(a3700_spi, true);
/* Set SPI mode */
- a3700_spi_mode_set(a3700_spi, master->mode_bits);
+ a3700_spi_mode_set(a3700_spi, host->mode_bits);
/* Reset counters */
spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
@@ -315,11 +315,11 @@ static void a3700_spi_init(struct a3700_spi *a3700_spi)
static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
+ struct spi_controller *host = dev_id;
struct a3700_spi *a3700_spi;
u32 cause;
- a3700_spi = spi_master_get_devdata(master);
+ a3700_spi = spi_controller_get_devdata(host);
/* Get interrupt causes */
cause = spireg_read(a3700_spi, A3700_SPI_INT_STAT_REG);
@@ -344,7 +344,7 @@ static bool a3700_spi_wait_completion(struct spi_device *spi)
unsigned int ctrl_reg;
unsigned long timeout_jiffies;
- a3700_spi = spi_master_get_devdata(spi->master);
+ a3700_spi = spi_controller_get_devdata(spi->controller);
/* SPI interrupt is edge-triggered, which means an interrupt will
* be generated only when detecting a specific status bit changed
@@ -393,7 +393,7 @@ static bool a3700_spi_transfer_wait(struct spi_device *spi,
{
struct a3700_spi *a3700_spi;
- a3700_spi = spi_master_get_devdata(spi->master);
+ a3700_spi = spi_controller_get_devdata(spi->controller);
a3700_spi->wait_mask = bit_mask;
return a3700_spi_wait_completion(spi);
@@ -417,7 +417,7 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
{
struct a3700_spi *a3700_spi;
- a3700_spi = spi_master_get_devdata(spi->master);
+ a3700_spi = spi_controller_get_devdata(spi->controller);
a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
@@ -434,7 +434,7 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct a3700_spi *a3700_spi = spi_master_get_devdata(spi->master);
+ struct a3700_spi *a3700_spi = spi_controller_get_devdata(spi->controller);
if (!enable)
a3700_spi_activate_cs(a3700_spi, spi->chip_select);
@@ -565,10 +565,10 @@ static void a3700_spi_transfer_abort_fifo(struct a3700_spi *a3700_spi)
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
-static int a3700_spi_prepare_message(struct spi_master *master,
+static int a3700_spi_prepare_message(struct spi_controller *host,
struct spi_message *message)
{
- struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
struct spi_device *spi = message->spi;
int ret;
@@ -588,11 +588,11 @@ static int a3700_spi_prepare_message(struct spi_master *master,
return 0;
}
-static int a3700_spi_transfer_one_fifo(struct spi_master *master,
+static int a3700_spi_transfer_one_fifo(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
int ret = 0, timeout = A3700_SPI_TIMEOUT;
unsigned int nbits = 0, byte_len;
u32 val;
@@ -732,16 +732,16 @@ static int a3700_spi_transfer_one_fifo(struct spi_master *master,
error:
a3700_spi_transfer_abort_fifo(a3700_spi);
out:
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return ret;
}
-static int a3700_spi_transfer_one_full_duplex(struct spi_master *master,
+static int a3700_spi_transfer_one_full_duplex(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
u32 val;
/* Disable FIFO mode */
@@ -777,27 +777,27 @@ static int a3700_spi_transfer_one_full_duplex(struct spi_master *master,
}
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return 0;
}
-static int a3700_spi_transfer_one(struct spi_master *master,
+static int a3700_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
a3700_spi_transfer_setup(spi, xfer);
if (xfer->tx_buf && xfer->rx_buf)
- return a3700_spi_transfer_one_full_duplex(master, spi, xfer);
+ return a3700_spi_transfer_one_full_duplex(host, spi, xfer);
- return a3700_spi_transfer_one_fifo(master, spi, xfer);
+ return a3700_spi_transfer_one_fifo(host, spi, xfer);
}
-static int a3700_spi_unprepare_message(struct spi_master *master,
+static int a3700_spi_unprepare_message(struct spi_controller *host,
struct spi_message *message)
{
- struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
clk_disable(a3700_spi->clk);
@@ -815,14 +815,14 @@ static int a3700_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
- struct spi_master *master;
+ struct spi_controller *host;
struct a3700_spi *spi;
u32 num_cs = 0;
int irq, ret = 0;
- master = spi_alloc_master(dev, sizeof(*spi));
- if (!master) {
- dev_err(dev, "master allocation failed\n");
+ host = spi_alloc_host(dev, sizeof(*spi));
+ if (!host) {
+ dev_err(dev, "host allocation failed\n");
ret = -ENOMEM;
goto out;
}
@@ -833,23 +833,23 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
- master->bus_num = pdev->id;
- master->dev.of_node = of_node;
- master->mode_bits = SPI_MODE_3;
- master->num_chipselect = num_cs;
- master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32);
- master->prepare_message = a3700_spi_prepare_message;
- master->transfer_one = a3700_spi_transfer_one;
- master->unprepare_message = a3700_spi_unprepare_message;
- master->set_cs = a3700_spi_set_cs;
- master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
+ host->bus_num = pdev->id;
+ host->dev.of_node = of_node;
+ host->mode_bits = SPI_MODE_3;
+ host->num_chipselect = num_cs;
+ host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32);
+ host->prepare_message = a3700_spi_prepare_message;
+ host->transfer_one = a3700_spi_transfer_one;
+ host->unprepare_message = a3700_spi_unprepare_message;
+ host->set_cs = a3700_spi_set_cs;
+ host->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- spi = spi_master_get_devdata(master);
+ spi = spi_controller_get_devdata(host);
- spi->master = master;
+ spi->host = host;
spi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi->base)) {
@@ -878,23 +878,23 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
- master->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
+ host->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
clk_get_rate(spi->clk));
- master->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
+ host->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
A3700_SPI_MAX_PRESCALE);
a3700_spi_init(spi);
ret = devm_request_irq(dev, spi->irq, a3700_spi_interrupt, 0,
- dev_name(dev), master);
+ dev_name(dev), host);
if (ret) {
dev_err(dev, "could not request IRQ: %d\n", ret);
goto error_clk;
}
- ret = devm_spi_register_master(dev, master);
+ ret = devm_spi_register_controller(dev, host);
if (ret) {
- dev_err(dev, "Failed to register master\n");
+ dev_err(dev, "Failed to register host\n");
goto error_clk;
}
@@ -903,15 +903,15 @@ static int a3700_spi_probe(struct platform_device *pdev)
error_clk:
clk_unprepare(spi->clk);
error:
- spi_master_put(master);
+ spi_controller_put(host);
out:
return ret;
}
static int a3700_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct a3700_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct a3700_spi *spi = spi_controller_get_devdata(host);
clk_unprepare(spi->clk);
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index 9cd738682aab..fab9d223e24a 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -38,7 +38,7 @@
#define US_CR_TXEN BIT(6)
#define US_CR_TXDIS BIT(7)
-#define US_MR_SPI_MASTER 0x0E
+#define US_MR_SPI_HOST 0x0E
#define US_MR_CHRL GENMASK(7, 6)
#define US_MR_CPHA BIT(8)
#define US_MR_CPOL BIT(16)
@@ -61,7 +61,7 @@
#define US_OVRE_RXRDY_IRQS (US_IR_OVRE | US_IR_RXRDY)
#define US_INIT \
- (US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
+ (US_MR_SPI_HOST | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
#define US_DMA_MIN_BYTES 16
#define US_DMA_TIMEOUT (msecs_to_jiffies(1000))
@@ -104,7 +104,7 @@ struct at91_usart_spi {
static void dma_callback(void *data)
{
struct spi_controller *ctlr = data;
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
aus->current_rx_remaining_bytes = 0;
@@ -115,7 +115,7 @@ static bool at91_usart_spi_can_dma(struct spi_controller *ctrl,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctrl);
return aus->use_dma && xfer->len >= US_DMA_MIN_BYTES;
}
@@ -216,7 +216,7 @@ static void at91_usart_spi_stop_dma(struct spi_controller *ctlr)
static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
struct dma_chan *rxchan = ctlr->dma_rx;
struct dma_chan *txchan = ctlr->dma_tx;
struct dma_async_tx_descriptor *rxdesc;
@@ -334,7 +334,7 @@ at91_usart_spi_set_xfer_speed(struct at91_usart_spi *aus,
static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
{
struct spi_controller *controller = dev_id;
- struct at91_usart_spi *aus = spi_master_get_devdata(controller);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(controller);
spin_lock(&aus->lock);
at91_usart_spi_read_status(aus);
@@ -359,7 +359,7 @@ static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
static int at91_usart_spi_setup(struct spi_device *spi)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(spi->controller);
u32 *ausd = spi->controller_state;
unsigned int mr = at91_usart_spi_readl(aus, MR);
@@ -399,7 +399,7 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
unsigned long dma_timeout = 0;
int ret = 0;
@@ -444,7 +444,7 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
struct spi_message *message)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
struct spi_device *spi = message->spi;
u32 *ausd = spi->controller_state;
@@ -458,7 +458,7 @@ static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
static int at91_usart_spi_unprepare_message(struct spi_controller *ctlr,
struct spi_message *message)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
at91_usart_spi_writel(aus, IDR, US_OVRE_RXRDY_IRQS);
@@ -515,7 +515,7 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
return PTR_ERR(clk);
ret = -ENOMEM;
- controller = spi_alloc_master(&pdev->dev, sizeof(*aus));
+ controller = spi_alloc_host(&pdev->dev, sizeof(*aus));
if (!controller)
goto at91_usart_spi_probe_fail;
@@ -539,7 +539,7 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
US_MAX_CLK_DIV);
platform_set_drvdata(pdev, controller);
- aus = spi_master_get_devdata(controller);
+ aus = spi_controller_get_devdata(controller);
aus->dev = &pdev->dev;
aus->regs = devm_ioremap_resource(&pdev->dev, regs);
@@ -574,9 +574,9 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
spin_lock_init(&aus->lock);
init_completion(&aus->xfer_completion);
- ret = devm_spi_register_master(&pdev->dev, controller);
+ ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret)
- goto at91_usart_fail_register_master;
+ goto at91_usart_fail_register_controller;
dev_info(&pdev->dev,
"AT91 USART SPI Controller version 0x%x at %pa (irq %d)\n",
@@ -585,19 +585,19 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
return 0;
-at91_usart_fail_register_master:
+at91_usart_fail_register_controller:
at91_usart_spi_release_dma(controller);
at91_usart_fail_dma:
clk_disable_unprepare(clk);
at91_usart_spi_probe_fail:
- spi_master_put(controller);
+ spi_controller_put(controller);
return ret;
}
__maybe_unused static int at91_usart_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
clk_disable_unprepare(aus->clk);
pinctrl_pm_select_sleep_state(dev);
@@ -608,7 +608,7 @@ __maybe_unused static int at91_usart_spi_runtime_suspend(struct device *dev)
__maybe_unused static int at91_usart_spi_runtime_resume(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
- struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctrl);
pinctrl_pm_select_default_state(dev);
@@ -633,7 +633,7 @@ __maybe_unused static int at91_usart_spi_suspend(struct device *dev)
__maybe_unused static int at91_usart_spi_resume(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
- struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctrl);
int ret;
if (!pm_runtime_suspended(dev)) {
@@ -650,7 +650,7 @@ __maybe_unused static int at91_usart_spi_resume(struct device *dev)
static int at91_usart_spi_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = platform_get_drvdata(pdev);
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
at91_usart_spi_release_dma(ctlr);
clk_disable_unprepare(aus->clk);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 607e7a49fb89..795e88dbef1b 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -58,7 +58,7 @@ static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned int reg, u32 val)
static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
{
- return spi_master_get_devdata(spi->master);
+ return spi_controller_get_devdata(spi->controller);
}
static inline void ath79_spi_delay(struct ath79_spi *sp, unsigned int nsecs)
@@ -120,7 +120,7 @@ static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
else
out = ioc & ~AR71XX_SPI_IOC_DO;
- /* setup MSB (to slave) on trailing edge */
+ /* setup MSB (to target) on trailing edge */
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
ath79_spi_delay(sp, nsecs);
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
@@ -168,28 +168,28 @@ static const struct spi_controller_mem_ops ath79_mem_ops = {
static int ath79_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct ath79_spi *sp;
unsigned long rate;
int ret;
- master = spi_alloc_master(&pdev->dev, sizeof(*sp));
- if (master == NULL) {
- dev_err(&pdev->dev, "failed to allocate spi master\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(*sp));
+ if (host == NULL) {
+ dev_err(&pdev->dev, "failed to allocate spi host\n");
return -ENOMEM;
}
- sp = spi_master_get_devdata(master);
- master->dev.of_node = pdev->dev.of_node;
+ sp = spi_controller_get_devdata(host);
+ host->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, sp);
- master->use_gpio_descriptors = true;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->flags = SPI_MASTER_GPIO_SS;
- master->num_chipselect = 3;
- master->mem_ops = &ath79_mem_ops;
+ host->use_gpio_descriptors = true;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ host->flags = SPI_MASTER_GPIO_SS;
+ host->num_chipselect = 3;
+ host->mem_ops = &ath79_mem_ops;
- sp->bitbang.master = master;
+ sp->bitbang.master = host;
sp->bitbang.chipselect = ath79_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
sp->bitbang.flags = SPI_CS_HIGH;
@@ -197,18 +197,18 @@ static int ath79_spi_probe(struct platform_device *pdev)
sp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sp->base)) {
ret = PTR_ERR(sp->base);
- goto err_put_master;
+ goto err_put_host;
}
sp->clk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sp->clk)) {
ret = PTR_ERR(sp->clk);
- goto err_put_master;
+ goto err_put_host;
}
ret = clk_prepare_enable(sp->clk);
if (ret)
- goto err_put_master;
+ goto err_put_host;
rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
if (!rate) {
@@ -231,8 +231,8 @@ err_disable:
ath79_spi_disable(sp);
err_clk_disable:
clk_disable_unprepare(sp->clk);
-err_put_master:
- spi_master_put(sp->bitbang.master);
+err_put_host:
+ spi_controller_put(host);
return ret;
}
@@ -244,7 +244,7 @@ static int ath79_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&sp->bitbang);
ath79_spi_disable(sp);
clk_disable_unprepare(sp->clk);
- spi_master_put(sp->bitbang.master);
+ spi_controller_put(sp->bitbang.master);
return 0;
}
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index c4f22d50dba5..5c5678f065f3 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -358,7 +358,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
u32 csr;
/* Make sure clock polarity is correct */
- for (i = 0; i < spi->master->num_chipselect; i++) {
+ for (i = 0; i < spi->controller->num_chipselect; i++) {
csr = spi_readl(as, CSR0 + 4 * i);
if ((csr ^ cpol) & SPI_BIT(CPOL))
spi_writel(as, CSR0 + 4 * i,
@@ -419,11 +419,11 @@ static inline bool atmel_spi_use_dma(struct atmel_spi *as,
return as->use_dma && xfer->len >= DMA_MIN_BYTES;
}
-static bool atmel_spi_can_dma(struct spi_master *master,
+static bool atmel_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
return atmel_spi_use_dma(as, xfer) &&
@@ -435,7 +435,7 @@ static bool atmel_spi_can_dma(struct spi_master *master,
static int atmel_spi_dma_slave_config(struct atmel_spi *as, u8 bits_per_word)
{
- struct spi_master *master = platform_get_drvdata(as->pdev);
+ struct spi_controller *host = platform_get_drvdata(as->pdev);
struct dma_slave_config slave_config;
int err = 0;
@@ -467,21 +467,21 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as, u8 bits_per_word)
* So we'd rather write only one data at the time. Hence the transmit
* path works the same whether FIFOs are available (and enabled) or not.
*/
- if (dmaengine_slave_config(master->dma_tx, &slave_config)) {
+ if (dmaengine_slave_config(host->dma_tx, &slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure tx dma channel\n");
err = -EINVAL;
}
/*
- * This driver configures the spi controller for master mode (MSTR bit
+ * This driver configures the spi controller for host mode (MSTR bit
* set to '1' in the Mode Register).
* So according to the datasheet, when FIFOs are available (and
* enabled), the Receive FIFO operates in Single Data Mode.
* So the receive path works the same whether FIFOs are available (and
* enabled) or not.
*/
- if (dmaengine_slave_config(master->dma_rx, &slave_config)) {
+ if (dmaengine_slave_config(host->dma_rx, &slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure rx dma channel\n");
err = -EINVAL;
@@ -490,22 +490,22 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as, u8 bits_per_word)
return err;
}
-static int atmel_spi_configure_dma(struct spi_master *master,
+static int atmel_spi_configure_dma(struct spi_controller *host,
struct atmel_spi *as)
{
struct device *dev = &as->pdev->dev;
int err;
- master->dma_tx = dma_request_chan(dev, "tx");
- if (IS_ERR(master->dma_tx)) {
- err = PTR_ERR(master->dma_tx);
+ host->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(host->dma_tx)) {
+ err = PTR_ERR(host->dma_tx);
dev_dbg(dev, "No TX DMA channel, DMA is disabled\n");
goto error_clear;
}
- master->dma_rx = dma_request_chan(dev, "rx");
- if (IS_ERR(master->dma_rx)) {
- err = PTR_ERR(master->dma_rx);
+ host->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(host->dma_rx)) {
+ err = PTR_ERR(host->dma_rx);
/*
* No reason to check EPROBE_DEFER here since we have already
* requested tx channel.
@@ -520,45 +520,45 @@ static int atmel_spi_configure_dma(struct spi_master *master,
dev_info(&as->pdev->dev,
"Using %s (tx) and %s (rx) for DMA transfers\n",
- dma_chan_name(master->dma_tx),
- dma_chan_name(master->dma_rx));
+ dma_chan_name(host->dma_tx),
+ dma_chan_name(host->dma_rx));
return 0;
error:
- if (!IS_ERR(master->dma_rx))
- dma_release_channel(master->dma_rx);
- if (!IS_ERR(master->dma_tx))
- dma_release_channel(master->dma_tx);
+ if (!IS_ERR(host->dma_rx))
+ dma_release_channel(host->dma_rx);
+ if (!IS_ERR(host->dma_tx))
+ dma_release_channel(host->dma_tx);
error_clear:
- master->dma_tx = master->dma_rx = NULL;
+ host->dma_tx = host->dma_rx = NULL;
return err;
}
-static void atmel_spi_stop_dma(struct spi_master *master)
+static void atmel_spi_stop_dma(struct spi_controller *host)
{
- if (master->dma_rx)
- dmaengine_terminate_all(master->dma_rx);
- if (master->dma_tx)
- dmaengine_terminate_all(master->dma_tx);
+ if (host->dma_rx)
+ dmaengine_terminate_all(host->dma_rx);
+ if (host->dma_tx)
+ dmaengine_terminate_all(host->dma_tx);
}
-static void atmel_spi_release_dma(struct spi_master *master)
+static void atmel_spi_release_dma(struct spi_controller *host)
{
- if (master->dma_rx) {
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (host->dma_rx) {
+ dma_release_channel(host->dma_rx);
+ host->dma_rx = NULL;
}
- if (master->dma_tx) {
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (host->dma_tx) {
+ dma_release_channel(host->dma_tx);
+ host->dma_tx = NULL;
}
}
/* This function is called by the DMA driver from tasklet context */
static void dma_callback(void *data)
{
- struct spi_master *master = data;
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = data;
+ struct atmel_spi *as = spi_controller_get_devdata(host);
if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
@@ -571,13 +571,13 @@ static void dma_callback(void *data)
/*
* Next transfer using PIO without FIFO.
*/
-static void atmel_spi_next_xfer_single(struct spi_master *master,
+static void atmel_spi_next_xfer_single(struct spi_controller *host,
struct spi_transfer *xfer)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
- dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
+ dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_pio\n");
/* Make sure data is not remaining in RDR */
spi_readl(as, RDR);
@@ -591,7 +591,7 @@ static void atmel_spi_next_xfer_single(struct spi_master *master,
else
spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
- dev_dbg(master->dev.parent,
+ dev_dbg(host->dev.parent,
" start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
xfer->bits_per_word);
@@ -603,10 +603,10 @@ static void atmel_spi_next_xfer_single(struct spi_master *master,
/*
* Next transfer using PIO with FIFO.
*/
-static void atmel_spi_next_xfer_fifo(struct spi_master *master,
+static void atmel_spi_next_xfer_fifo(struct spi_controller *host,
struct spi_transfer *xfer)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
u32 current_remaining_data, num_data;
u32 offset = xfer->len - as->current_remaining_bytes;
const u16 *words = (const u16 *)((u8 *)xfer->tx_buf + offset);
@@ -614,7 +614,7 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
u16 td0, td1;
u32 fifomr;
- dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_fifo\n");
+ dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_fifo\n");
/* Compute the number of data to transfer in the current iteration */
current_remaining_data = ((xfer->bits_per_word > 8) ?
@@ -658,7 +658,7 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
num_data--;
}
- dev_dbg(master->dev.parent,
+ dev_dbg(host->dev.parent,
" start fifo xfer %p: len %u tx %p rx %p bitpw %d\n",
xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
xfer->bits_per_word);
@@ -673,32 +673,32 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
/*
* Next transfer using PIO.
*/
-static void atmel_spi_next_xfer_pio(struct spi_master *master,
+static void atmel_spi_next_xfer_pio(struct spi_controller *host,
struct spi_transfer *xfer)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
if (as->fifo_size)
- atmel_spi_next_xfer_fifo(master, xfer);
+ atmel_spi_next_xfer_fifo(host, xfer);
else
- atmel_spi_next_xfer_single(master, xfer);
+ atmel_spi_next_xfer_single(host, xfer);
}
/*
* Submit next transfer for DMA.
*/
-static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
+static int atmel_spi_next_xfer_dma_submit(struct spi_controller *host,
struct spi_transfer *xfer,
u32 *plen)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct dma_chan *rxchan = master->dma_rx;
- struct dma_chan *txchan = master->dma_tx;
+ struct atmel_spi *as = spi_controller_get_devdata(host);
+ struct dma_chan *rxchan = host->dma_rx;
+ struct dma_chan *txchan = host->dma_tx;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
dma_cookie_t cookie;
- dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
+ dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
/* Check that the channels are available */
if (!rxchan || !txchan)
@@ -749,7 +749,7 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
if (!txdesc)
goto err_dma;
- dev_dbg(master->dev.parent,
+ dev_dbg(host->dev.parent,
" start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
xfer->rx_buf, (unsigned long long)xfer->rx_dma);
@@ -759,7 +759,7 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
/* Put the callback on the RX transfer only, that should finish last */
rxdesc->callback = dma_callback;
- rxdesc->callback_param = master;
+ rxdesc->callback_param = host;
/* Submit and fire RX and TX with TX last so we're ready to read! */
cookie = rxdesc->tx_submit(rxdesc);
@@ -775,12 +775,12 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
err_dma:
spi_writel(as, IDR, SPI_BIT(OVRES));
- atmel_spi_stop_dma(master);
+ atmel_spi_stop_dma(host);
err_exit:
return -ENOMEM;
}
-static void atmel_spi_next_xfer_data(struct spi_master *master,
+static void atmel_spi_next_xfer_data(struct spi_controller *host,
struct spi_transfer *xfer,
dma_addr_t *tx_dma,
dma_addr_t *rx_dma,
@@ -788,8 +788,8 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
{
*rx_dma = xfer->rx_dma + xfer->len - *plen;
*tx_dma = xfer->tx_dma + xfer->len - *plen;
- if (*plen > master->max_dma_len)
- *plen = master->max_dma_len;
+ if (*plen > host->max_dma_len)
+ *plen = host->max_dma_len;
}
static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
@@ -844,17 +844,17 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
* Submit next transfer for PDC.
* lock is held, spi irq is blocked
*/
-static void atmel_spi_pdc_next_xfer(struct spi_master *master,
+static void atmel_spi_pdc_next_xfer(struct spi_controller *host,
struct spi_transfer *xfer)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
u32 len;
dma_addr_t tx_dma, rx_dma;
spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
len = as->current_remaining_bytes;
- atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ atmel_spi_next_xfer_data(host, xfer, &tx_dma, &rx_dma, &len);
as->current_remaining_bytes -= len;
spi_writel(as, RPR, rx_dma);
@@ -865,7 +865,7 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, RCR, len);
spi_writel(as, TCR, len);
- dev_dbg(&master->dev,
+ dev_dbg(&host->dev,
" start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
@@ -873,7 +873,7 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
if (as->current_remaining_bytes) {
len = as->current_remaining_bytes;
- atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ atmel_spi_next_xfer_data(host, xfer, &tx_dma, &rx_dma, &len);
as->current_remaining_bytes -= len;
spi_writel(as, RNPR, rx_dma);
@@ -884,7 +884,7 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, RNCR, len);
spi_writel(as, TNCR, len);
- dev_dbg(&master->dev,
+ dev_dbg(&host->dev,
" next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
@@ -944,14 +944,14 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
return 0;
}
-static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
+static void atmel_spi_dma_unmap_xfer(struct spi_controller *host,
struct spi_transfer *xfer)
{
if (xfer->tx_dma != INVALID_DMA_ADDRESS)
- dma_unmap_single(master->dev.parent, xfer->tx_dma,
+ dma_unmap_single(host->dev.parent, xfer->tx_dma,
xfer->len, DMA_TO_DEVICE);
if (xfer->rx_dma != INVALID_DMA_ADDRESS)
- dma_unmap_single(master->dev.parent, xfer->rx_dma,
+ dma_unmap_single(host->dev.parent, xfer->rx_dma,
xfer->len, DMA_FROM_DEVICE);
}
@@ -1039,8 +1039,8 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
static irqreturn_t
atmel_spi_pio_interrupt(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_id;
+ struct atmel_spi *as = spi_controller_get_devdata(host);
u32 status, pending, imr;
struct spi_transfer *xfer;
int ret = IRQ_NONE;
@@ -1052,7 +1052,7 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
if (pending & SPI_BIT(OVRES)) {
ret = IRQ_HANDLED;
spi_writel(as, IDR, SPI_BIT(OVRES));
- dev_warn(master->dev.parent, "overrun\n");
+ dev_warn(host->dev.parent, "overrun\n");
/*
* When we get an overrun, we disregard the current
@@ -1097,8 +1097,8 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
static irqreturn_t
atmel_spi_pdc_interrupt(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_id;
+ struct atmel_spi *as = spi_controller_get_devdata(host);
u32 status, pending, imr;
int ret = IRQ_NONE;
@@ -1152,12 +1152,12 @@ static int atmel_word_delay_csr(struct spi_device *spi, struct atmel_spi *as)
static void initialize_native_cs_for_gpio(struct atmel_spi *as)
{
int i;
- struct spi_master *master = platform_get_drvdata(as->pdev);
+ struct spi_controller *host = platform_get_drvdata(as->pdev);
if (!as->native_cs_free)
return; /* already initialized */
- if (!master->cs_gpiods)
+ if (!host->cs_gpiods)
return; /* No CS GPIO */
/*
@@ -1170,7 +1170,7 @@ static void initialize_native_cs_for_gpio(struct atmel_spi *as)
i = 1;
for (; i < 4; i++)
- if (master->cs_gpiods[i])
+ if (host->cs_gpiods[i])
as->native_cs_free |= BIT(i);
if (as->native_cs_free)
@@ -1186,7 +1186,7 @@ static int atmel_spi_setup(struct spi_device *spi)
int chip_select;
int word_delay_csr;
- as = spi_master_get_devdata(spi->master);
+ as = spi_controller_get_devdata(spi->controller);
/* see notes above re chipselect */
if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH)) {
@@ -1254,7 +1254,7 @@ static int atmel_spi_setup(struct spi_device *spi)
static void atmel_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct atmel_spi *as = spi_master_get_devdata(spi->master);
+ struct atmel_spi *as = spi_controller_get_devdata(spi->controller);
/* the core doesn't really pass us enable/disable, but CS HIGH vs CS LOW
* since we already have routines for activate/deactivate translate
* high/low to active/inactive
@@ -1269,7 +1269,7 @@ static void atmel_spi_set_cs(struct spi_device *spi, bool enable)
}
-static int atmel_spi_one_transfer(struct spi_master *master,
+static int atmel_spi_one_transfer(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
@@ -1281,7 +1281,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
int ret;
unsigned long dma_timeout;
- as = spi_master_get_devdata(master);
+ as = spi_controller_get_devdata(host);
asd = spi->controller_state;
bits = (asd->csr >> 4) & 0xf;
@@ -1295,7 +1295,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
* DMA map early, for performance (empties dcache ASAP) and
* better fault reporting.
*/
- if ((!master->cur_msg->is_dma_mapped)
+ if ((!host->cur_msg->is_dma_mapped)
&& as->use_pdc) {
if (atmel_spi_dma_map_xfer(as, xfer) < 0)
return -ENOMEM;
@@ -1311,11 +1311,11 @@ static int atmel_spi_one_transfer(struct spi_master *master,
if (as->use_pdc) {
atmel_spi_lock(as);
- atmel_spi_pdc_next_xfer(master, xfer);
+ atmel_spi_pdc_next_xfer(host, xfer);
atmel_spi_unlock(as);
} else if (atmel_spi_use_dma(as, xfer)) {
len = as->current_remaining_bytes;
- ret = atmel_spi_next_xfer_dma_submit(master,
+ ret = atmel_spi_next_xfer_dma_submit(host,
xfer, &len);
if (ret) {
dev_err(&spi->dev,
@@ -1329,7 +1329,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
}
} else {
atmel_spi_lock(as);
- atmel_spi_next_xfer_pio(master, xfer);
+ atmel_spi_next_xfer_pio(host, xfer);
atmel_spi_unlock(as);
}
@@ -1346,7 +1346,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
if (as->done_status) {
if (as->use_pdc) {
- dev_warn(master->dev.parent,
+ dev_warn(host->dev.parent,
"overrun (%u/%u remaining)\n",
spi_readl(as, TCR), spi_readl(as, RCR));
@@ -1362,7 +1362,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
break;
if (!timeout)
- dev_warn(master->dev.parent,
+ dev_warn(host->dev.parent,
"timeout waiting for TXEMPTY");
while (spi_readl(as, SR) & SPI_BIT(RDRF))
spi_readl(as, RDR);
@@ -1371,13 +1371,13 @@ static int atmel_spi_one_transfer(struct spi_master *master,
spi_readl(as, SR);
} else if (atmel_spi_use_dma(as, xfer)) {
- atmel_spi_stop_dma(master);
+ atmel_spi_stop_dma(host);
}
}
- if (!master->cur_msg->is_dma_mapped
+ if (!host->cur_msg->is_dma_mapped
&& as->use_pdc)
- atmel_spi_dma_unmap_xfer(master, xfer);
+ atmel_spi_dma_unmap_xfer(host, xfer);
if (as->use_pdc)
atmel_spi_disable_pdc_transfer(as);
@@ -1440,7 +1440,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
int irq;
struct clk *clk;
int ret;
- struct spi_master *master;
+ struct spi_controller *host;
struct atmel_spi *as;
/* Select default pin state */
@@ -1459,29 +1459,29 @@ static int atmel_spi_probe(struct platform_device *pdev)
return PTR_ERR(clk);
/* setup spi core then atmel-specific driver state */
- master = spi_alloc_master(&pdev->dev, sizeof(*as));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*as));
+ if (!host)
return -ENOMEM;
/* the spi->mode bits understood by this driver: */
- master->use_gpio_descriptors = true;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
- master->dev.of_node = pdev->dev.of_node;
- master->bus_num = pdev->id;
- master->num_chipselect = 4;
- master->setup = atmel_spi_setup;
- master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX |
+ host->use_gpio_descriptors = true;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
+ host->dev.of_node = pdev->dev.of_node;
+ host->bus_num = pdev->id;
+ host->num_chipselect = 4;
+ host->setup = atmel_spi_setup;
+ host->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX |
SPI_MASTER_GPIO_SS);
- master->transfer_one = atmel_spi_one_transfer;
- master->set_cs = atmel_spi_set_cs;
- master->cleanup = atmel_spi_cleanup;
- master->auto_runtime_pm = true;
- master->max_dma_len = SPI_MAX_DMA_XFER;
- master->can_dma = atmel_spi_can_dma;
- platform_set_drvdata(pdev, master);
+ host->transfer_one = atmel_spi_one_transfer;
+ host->set_cs = atmel_spi_set_cs;
+ host->cleanup = atmel_spi_cleanup;
+ host->auto_runtime_pm = true;
+ host->max_dma_len = SPI_MAX_DMA_XFER;
+ host->can_dma = atmel_spi_can_dma;
+ platform_set_drvdata(pdev, host);
- as = spi_master_get_devdata(master);
+ as = spi_controller_get_devdata(host);
spin_lock_init(&as->lock);
@@ -1502,7 +1502,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
- ret = atmel_spi_configure_dma(master, as);
+ ret = atmel_spi_configure_dma(host, as);
if (ret == 0) {
as->use_dma = true;
} else if (ret == -EPROBE_DEFER) {
@@ -1532,7 +1532,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
}
}
if (!as->use_dma)
- dev_info(master->dev.parent,
+ dev_info(host->dev.parent,
" can not allocate dma coherent memory\n");
}
@@ -1541,10 +1541,10 @@ static int atmel_spi_probe(struct platform_device *pdev)
if (as->use_pdc) {
ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
- 0, dev_name(&pdev->dev), master);
+ 0, dev_name(&pdev->dev), host);
} else {
ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
- 0, dev_name(&pdev->dev), master);
+ 0, dev_name(&pdev->dev), host);
}
if (ret)
goto out_unmap_regs;
@@ -1569,7 +1569,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret)
goto out_free_dma;
@@ -1585,28 +1585,28 @@ out_free_dma:
pm_runtime_set_suspended(&pdev->dev);
if (as->use_dma)
- atmel_spi_release_dma(master);
+ atmel_spi_release_dma(host);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
clk_disable_unprepare(clk);
out_free_irq:
out_unmap_regs:
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
static int atmel_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
pm_runtime_get_sync(&pdev->dev);
/* reset the hardware and block queue progress */
if (as->use_dma) {
- atmel_spi_stop_dma(master);
- atmel_spi_release_dma(master);
+ atmel_spi_stop_dma(host);
+ atmel_spi_release_dma(host);
if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
as->addr_tx_bbuf,
@@ -1633,8 +1633,8 @@ static int atmel_spi_remove(struct platform_device *pdev)
static int atmel_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
clk_disable_unprepare(as->clk);
pinctrl_pm_select_sleep_state(dev);
@@ -1644,8 +1644,8 @@ static int atmel_spi_runtime_suspend(struct device *dev)
static int atmel_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
pinctrl_pm_select_default_state(dev);
@@ -1654,11 +1654,11 @@ static int atmel_spi_runtime_resume(struct device *dev)
static int atmel_spi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
int ret;
/* Stop the queue running */
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -1670,8 +1670,8 @@ static int atmel_spi_suspend(struct device *dev)
static int atmel_spi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(as->clk);
@@ -1689,7 +1689,7 @@ static int atmel_spi_resume(struct device *dev)
}
/* Start the queue running */
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
static const struct dev_pm_ops atmel_spi_pm_ops = {
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index b871fd810d80..cd0a6478f5e7 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -20,6 +20,8 @@
#include <linux/spi/spi.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/mtd/spi-nor.h>
#include <linux/reset.h>
#include <linux/pm_runtime.h>
@@ -57,6 +59,7 @@
#define PINGPONG_CMD_SS_SHIFT 12
#define HSSPI_PINGPONG_STATUS_REG(x) (0x84 + (x) * 0x40)
+#define HSSPI_PINGPONG_STATUS_SRC_BUSY BIT(1)
#define HSSPI_PROFILE_CLK_CTRL_REG(x) (0x100 + (x) * 0x20)
#define CLK_CTRL_FREQ_CTRL_MASK 0x0000ffff
@@ -92,15 +95,42 @@
#define HSSPI_MAX_PREPEND_LEN 15
-#define HSSPI_MAX_SYNC_CLOCK 30000000
+/*
+ * Some chip require 30MHz but other require 25MHz. Use smaller value to cover
+ * both cases.
+ */
+#define HSSPI_MAX_SYNC_CLOCK 25000000
#define HSSPI_SPI_MAX_CS 8
#define HSSPI_BUS_NUM 1 /* 0 is legacy SPI */
+#define HSSPI_POLL_STATUS_TIMEOUT_MS 100
+
+#define HSSPI_WAIT_MODE_POLLING 0
+#define HSSPI_WAIT_MODE_INTR 1
+#define HSSPI_WAIT_MODE_MAX HSSPI_WAIT_MODE_INTR
+
+/*
+ * Default transfer mode is auto. If the msg is prependable, use the prepend
+ * mode. If not, falls back to use the dummy cs workaround mode but limit the
+ * clock to 25MHz to make sure it works in all board design.
+ */
+#define HSSPI_XFER_MODE_AUTO 0
+#define HSSPI_XFER_MODE_PREPEND 1
+#define HSSPI_XFER_MODE_DUMMYCS 2
+#define HSSPI_XFER_MODE_MAX HSSPI_XFER_MODE_DUMMYCS
+
+#define bcm63xx_prepend_printk_on_checkfail(bs, fmt, ...) \
+do { \
+ if (bs->xfer_mode == HSSPI_XFER_MODE_AUTO) \
+ dev_dbg(&bs->pdev->dev, fmt, ##__VA_ARGS__); \
+ else if (bs->xfer_mode == HSSPI_XFER_MODE_PREPEND) \
+ dev_err(&bs->pdev->dev, fmt, ##__VA_ARGS__); \
+} while (0)
struct bcm63xx_hsspi {
struct completion done;
struct mutex bus_mutex;
-
+ struct mutex msg_mutex;
struct platform_device *pdev;
struct clk *clk;
struct clk *pll_clk;
@@ -109,8 +139,289 @@ struct bcm63xx_hsspi {
u32 speed_hz;
u8 cs_polarity;
+ u32 wait_mode;
+ u32 xfer_mode;
+ u32 prepend_cnt;
+ u8 *prepend_buf;
+};
+
+static ssize_t wait_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+
+ return sprintf(buf, "%d\n", bs->wait_mode);
+}
+
+static ssize_t wait_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+ u32 val;
+
+ if (kstrtou32(buf, 10, &val))
+ return -EINVAL;
+
+ if (val > HSSPI_WAIT_MODE_MAX) {
+ dev_warn(dev, "invalid wait mode %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bs->msg_mutex);
+ bs->wait_mode = val;
+ /* clear interrupt status to avoid spurious int on next transfer */
+ if (val == HSSPI_WAIT_MODE_INTR)
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+ mutex_unlock(&bs->msg_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(wait_mode);
+
+static ssize_t xfer_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+
+ return sprintf(buf, "%d\n", bs->xfer_mode);
+}
+
+static ssize_t xfer_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+ u32 val;
+
+ if (kstrtou32(buf, 10, &val))
+ return -EINVAL;
+
+ if (val > HSSPI_XFER_MODE_MAX) {
+ dev_warn(dev, "invalid xfer mode %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bs->msg_mutex);
+ bs->xfer_mode = val;
+ mutex_unlock(&bs->msg_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(xfer_mode);
+
+static struct attribute *bcm63xx_hsspi_attrs[] = {
+ &dev_attr_wait_mode.attr,
+ &dev_attr_xfer_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group bcm63xx_hsspi_group = {
+ .attrs = bcm63xx_hsspi_attrs,
};
+static void bcm63xx_hsspi_set_clk(struct bcm63xx_hsspi *bs,
+ struct spi_device *spi, int hz);
+
+static size_t bcm63xx_hsspi_max_message_size(struct spi_device *spi)
+{
+ return HSSPI_BUFFER_LEN - HSSPI_OPCODE_LEN;
+}
+
+static int bcm63xx_hsspi_wait_cmd(struct bcm63xx_hsspi *bs)
+{
+ unsigned long limit;
+ u32 reg = 0;
+ int rc = 0;
+
+ if (bs->wait_mode == HSSPI_WAIT_MODE_INTR) {
+ if (wait_for_completion_timeout(&bs->done, HZ) == 0)
+ rc = 1;
+ } else {
+ /* polling mode checks for status busy bit */
+ limit = jiffies + msecs_to_jiffies(HSSPI_POLL_STATUS_TIMEOUT_MS);
+
+ while (!time_after(jiffies, limit)) {
+ reg = __raw_readl(bs->regs + HSSPI_PINGPONG_STATUS_REG(0));
+ if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
+ cpu_relax();
+ else
+ break;
+ }
+ if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
+ rc = 1;
+ }
+
+ if (rc)
+ dev_err(&bs->pdev->dev, "transfer timed out!\n");
+
+ return rc;
+}
+
+static bool bcm63xx_prepare_prepend_transfer(struct spi_master *master,
+ struct spi_message *msg,
+ struct spi_transfer *t_prepend)
+{
+
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ bool tx_only = false;
+ struct spi_transfer *t;
+
+ /*
+ * Multiple transfers within a message may be combined into one transfer
+ * to the controller using its prepend feature. A SPI message is prependable
+ * only if the following are all true:
+ * 1. One or more half duplex write transfer in single bit mode
+ * 2. Optional full duplex read/write at the end
+ * 3. No delay and cs_change between transfers
+ */
+ bs->prepend_cnt = 0;
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ if ((spi_delay_to_ns(&t->delay, t) > 0) || t->cs_change) {
+ bcm63xx_prepend_printk_on_checkfail(bs,
+ "Delay or cs change not supported in prepend mode!\n");
+ return false;
+ }
+
+ tx_only = false;
+ if (t->tx_buf && !t->rx_buf) {
+ tx_only = true;
+ if (bs->prepend_cnt + t->len >
+ (HSSPI_BUFFER_LEN - HSSPI_OPCODE_LEN)) {
+ bcm63xx_prepend_printk_on_checkfail(bs,
+ "exceed max buf len, abort prepending transfers!\n");
+ return false;
+ }
+
+ if (t->tx_nbits > SPI_NBITS_SINGLE &&
+ !list_is_last(&t->transfer_list, &msg->transfers)) {
+ bcm63xx_prepend_printk_on_checkfail(bs,
+ "multi-bit prepend buf not supported!\n");
+ return false;
+ }
+
+ if (t->tx_nbits == SPI_NBITS_SINGLE) {
+ memcpy(bs->prepend_buf + bs->prepend_cnt, t->tx_buf, t->len);
+ bs->prepend_cnt += t->len;
+ }
+ } else {
+ if (!list_is_last(&t->transfer_list, &msg->transfers)) {
+ bcm63xx_prepend_printk_on_checkfail(bs,
+ "rx/tx_rx transfer not supported when it is not last one!\n");
+ return false;
+ }
+ }
+
+ if (list_is_last(&t->transfer_list, &msg->transfers)) {
+ memcpy(t_prepend, t, sizeof(struct spi_transfer));
+
+ if (tx_only && t->tx_nbits == SPI_NBITS_SINGLE) {
+ /*
+ * if the last one is also a single bit tx only transfer, merge
+ * all of them into one single tx transfer
+ */
+ t_prepend->len = bs->prepend_cnt;
+ t_prepend->tx_buf = bs->prepend_buf;
+ bs->prepend_cnt = 0;
+ } else {
+ /*
+ * if the last one is not a tx only transfer or dual tx xfer, all
+ * the previous transfers are sent through prepend bytes and
+ * make sure it does not exceed the max prepend len
+ */
+ if (bs->prepend_cnt > HSSPI_MAX_PREPEND_LEN) {
+ bcm63xx_prepend_printk_on_checkfail(bs,
+ "exceed max prepend len, abort prepending transfers!\n");
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+static int bcm63xx_hsspi_do_prepend_txrx(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ unsigned int chip_select = spi->chip_select;
+ u16 opcode = 0, val;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+ u32 reg = 0;
+
+ /*
+ * shouldn't happen as we set the max_message_size in the probe.
+ * but check it again in case some driver does not honor the max size
+ */
+ if (t->len + bs->prepend_cnt > (HSSPI_BUFFER_LEN - HSSPI_OPCODE_LEN)) {
+ dev_warn(&bs->pdev->dev,
+ "Prepend message large than fifo size len %d prepend %d\n",
+ t->len, bs->prepend_cnt);
+ return -EINVAL;
+ }
+
+ bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
+
+ if (tx && rx)
+ opcode = HSSPI_OP_READ_WRITE;
+ else if (tx)
+ opcode = HSSPI_OP_WRITE;
+ else if (rx)
+ opcode = HSSPI_OP_READ;
+
+ if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
+ (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
+ opcode |= HSSPI_OP_MULTIBIT;
+
+ if (t->rx_nbits == SPI_NBITS_DUAL) {
+ reg |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
+ reg |= bs->prepend_cnt << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT;
+ }
+ if (t->tx_nbits == SPI_NBITS_DUAL) {
+ reg |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
+ reg |= bs->prepend_cnt << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT;
+ }
+ }
+
+ reg |= bs->prepend_cnt << MODE_CTRL_PREPENDBYTE_CNT_SHIFT;
+ __raw_writel(reg | 0xff,
+ bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+
+ reinit_completion(&bs->done);
+ if (bs->prepend_cnt)
+ memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, bs->prepend_buf,
+ bs->prepend_cnt);
+ if (tx)
+ memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN + bs->prepend_cnt, tx,
+ t->len);
+
+ *(__be16 *)(&val) = cpu_to_be16(opcode | t->len);
+ __raw_writew(val, bs->fifo);
+ /* enable interrupt */
+ if (bs->wait_mode == HSSPI_WAIT_MODE_INTR)
+ __raw_writel(HSSPI_PINGx_CMD_DONE(0), bs->regs + HSSPI_INT_MASK_REG);
+
+ /* start the transfer */
+ reg = chip_select << PINGPONG_CMD_SS_SHIFT |
+ chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+ PINGPONG_COMMAND_START_NOW;
+ __raw_writel(reg, bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+
+ if (bcm63xx_hsspi_wait_cmd(bs))
+ return -ETIMEDOUT;
+
+ if (rx)
+ memcpy_fromio(rx, bs->fifo, t->len);
+
+ return 0;
+}
+
static void bcm63xx_hsspi_set_cs(struct bcm63xx_hsspi *bs, unsigned int cs,
bool active)
{
@@ -158,14 +469,16 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
{
struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
unsigned int chip_select = spi->chip_select;
- u16 opcode = 0;
+ u16 opcode = 0, val;
int pending = t->len;
int step_size = HSSPI_BUFFER_LEN;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
+ u32 reg = 0;
bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
- bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+ if (!t->cs_off)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
if (tx && rx)
opcode = HSSPI_OP_READ_WRITE;
@@ -178,11 +491,16 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
step_size -= HSSPI_OPCODE_LEN;
if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
- (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL))
+ (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
opcode |= HSSPI_OP_MULTIBIT;
- __raw_writel(1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT |
- 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT | 0xff,
+ if (t->rx_nbits == SPI_NBITS_DUAL)
+ reg |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
+ if (t->tx_nbits == SPI_NBITS_DUAL)
+ reg |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
+ }
+
+ __raw_writel(reg | 0xff,
bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
while (pending > 0) {
@@ -194,22 +512,21 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
tx += curr_step;
}
- __raw_writew(opcode | curr_step, bs->fifo);
+ *(__be16 *)(&val) = cpu_to_be16(opcode | curr_step);
+ __raw_writew(val, bs->fifo);
/* enable interrupt */
- __raw_writel(HSSPI_PINGx_CMD_DONE(0),
- bs->regs + HSSPI_INT_MASK_REG);
+ if (bs->wait_mode == HSSPI_WAIT_MODE_INTR)
+ __raw_writel(HSSPI_PINGx_CMD_DONE(0),
+ bs->regs + HSSPI_INT_MASK_REG);
- /* start the transfer */
- __raw_writel(!chip_select << PINGPONG_CMD_SS_SHIFT |
- chip_select << PINGPONG_CMD_PROFILE_SHIFT |
- PINGPONG_COMMAND_START_NOW,
- bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+ reg = !chip_select << PINGPONG_CMD_SS_SHIFT |
+ chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+ PINGPONG_COMMAND_START_NOW;
+ __raw_writel(reg, bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
- if (wait_for_completion_timeout(&bs->done, HZ) == 0) {
- dev_err(&bs->pdev->dev, "transfer timed out!\n");
+ if (bcm63xx_hsspi_wait_cmd(bs))
return -ETIMEDOUT;
- }
if (rx) {
memcpy_fromio(rx, bs->fifo, curr_step);
@@ -259,17 +576,17 @@ static int bcm63xx_hsspi_setup(struct spi_device *spi)
return 0;
}
-static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
+static int bcm63xx_hsspi_do_dummy_cs_txrx(struct spi_device *spi,
struct spi_message *msg)
{
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
- struct spi_transfer *t;
- struct spi_device *spi = msg->spi;
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
int status = -EINVAL;
int dummy_cs;
- u32 reg;
+ bool keep_cs = false;
+ struct spi_transfer *t;
- /* This controller does not support keeping CS active during idle.
+ /*
+ * This controller does not support keeping CS active during idle.
* To work around this, we use the following ugly hack:
*
* a. Invert the target chip select's polarity so it will be active.
@@ -287,6 +604,21 @@ static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
bcm63xx_hsspi_set_cs(bs, dummy_cs, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
+ /*
+ * We are here because one of reasons below:
+ * a. Message is not prependable and in default auto xfer mode. This mean
+ * we fallback to dummy cs mode at maximum 25MHz safe clock rate.
+ * b. User set to use the dummy cs mode.
+ */
+ if (bs->xfer_mode == HSSPI_XFER_MODE_AUTO) {
+ if (t->speed_hz > HSSPI_MAX_SYNC_CLOCK) {
+ t->speed_hz = HSSPI_MAX_SYNC_CLOCK;
+ dev_warn_once(&bs->pdev->dev,
+ "Force to dummy cs mode. Reduce the speed to %dHz",
+ t->speed_hz);
+ }
+ }
+
status = bcm63xx_hsspi_do_txrx(spi, t);
if (status)
break;
@@ -295,23 +627,85 @@ static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
spi_transfer_delay_exec(t);
- if (t->cs_change)
- bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+ /* use existing cs change logic from spi_transfer_one_message */
+ if (t->cs_change) {
+ if (list_is_last(&t->transfer_list, &msg->transfers)) {
+ keep_cs = true;
+ } else {
+ if (!t->cs_off)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+
+ spi_transfer_cs_change_delay_exec(msg, t);
+
+ if (!list_next_entry(t, transfer_list)->cs_off)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+ }
+ } else if (!list_is_last(&t->transfer_list, &msg->transfers) &&
+ t->cs_off != list_next_entry(t, transfer_list)->cs_off) {
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, t->cs_off);
+ }
}
- mutex_lock(&bs->bus_mutex);
- reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
- reg &= ~GLOBAL_CTRL_CS_POLARITY_MASK;
- reg |= bs->cs_polarity;
- __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
- mutex_unlock(&bs->bus_mutex);
+ bcm63xx_hsspi_set_cs(bs, dummy_cs, false);
+ if (status || !keep_cs)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+
+ return status;
+}
+static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ int status = -EINVAL;
+ bool prependable = false;
+ struct spi_transfer t_prepend;
+
+ mutex_lock(&bs->msg_mutex);
+
+ if (bs->xfer_mode != HSSPI_XFER_MODE_DUMMYCS)
+ prependable = bcm63xx_prepare_prepend_transfer(master, msg, &t_prepend);
+
+ if (prependable) {
+ status = bcm63xx_hsspi_do_prepend_txrx(spi, &t_prepend);
+ msg->actual_length = (t_prepend.len + bs->prepend_cnt);
+ } else {
+ if (bs->xfer_mode == HSSPI_XFER_MODE_PREPEND) {
+ dev_err(&bs->pdev->dev,
+ "User sets prepend mode but msg not prependable! Abort transfer\n");
+ status = -EINVAL;
+ } else
+ status = bcm63xx_hsspi_do_dummy_cs_txrx(spi, msg);
+ }
+
+ mutex_unlock(&bs->msg_mutex);
msg->status = status;
spi_finalize_current_message(master);
return 0;
}
+static bool bcm63xx_hsspi_mem_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ /* Controller doesn't support spi mem dual io mode */
+ if ((op->cmd.opcode == SPINOR_OP_READ_1_2_2) ||
+ (op->cmd.opcode == SPINOR_OP_READ_1_2_2_4B) ||
+ (op->cmd.opcode == SPINOR_OP_READ_1_2_2_DTR) ||
+ (op->cmd.opcode == SPINOR_OP_READ_1_2_2_DTR_4B))
+ return false;
+
+ return true;
+}
+
+static const struct spi_controller_mem_ops bcm63xx_hsspi_mem_ops = {
+ .supports_op = bcm63xx_hsspi_mem_supports_op,
+};
+
static irqreturn_t bcm63xx_hsspi_interrupt(int irq, void *dev_id)
{
struct bcm63xx_hsspi *bs = (struct bcm63xx_hsspi *)dev_id;
@@ -398,10 +792,18 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
bs->regs = regs;
bs->speed_hz = rate;
bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
+ bs->wait_mode = HSSPI_WAIT_MODE_POLLING;
+ bs->prepend_buf = devm_kzalloc(dev, HSSPI_BUFFER_LEN, GFP_KERNEL);
+ if (!bs->prepend_buf) {
+ ret = -ENOMEM;
+ goto out_put_master;
+ }
mutex_init(&bs->bus_mutex);
+ mutex_init(&bs->msg_mutex);
init_completion(&bs->done);
+ master->mem_ops = &bcm63xx_hsspi_mem_ops;
master->dev.of_node = dev->of_node;
if (!dev->of_node)
master->bus_num = HSSPI_BUS_NUM;
@@ -415,6 +817,9 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
master->num_chipselect = num_cs;
master->setup = bcm63xx_hsspi_setup;
master->transfer_one_message = bcm63xx_hsspi_transfer_one;
+ master->max_transfer_size = bcm63xx_hsspi_max_message_size;
+ master->max_message_size = bcm63xx_hsspi_max_message_size;
+
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
SPI_RX_DUAL | SPI_TX_DUAL;
master->bits_per_word_mask = SPI_BPW_MASK(8);
@@ -434,21 +839,33 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
__raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
bs->regs + HSSPI_GLOBAL_CTRL_REG);
- ret = devm_request_irq(dev, irq, bcm63xx_hsspi_interrupt, IRQF_SHARED,
- pdev->name, bs);
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, bcm63xx_hsspi_interrupt, IRQF_SHARED,
+ pdev->name, bs);
- if (ret)
- goto out_put_master;
+ if (ret)
+ goto out_put_master;
+ }
pm_runtime_enable(&pdev->dev);
+ ret = sysfs_create_group(&pdev->dev.kobj, &bcm63xx_hsspi_group);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't register sysfs group\n");
+ goto out_pm_disable;
+ }
+
/* register and we are done */
ret = devm_spi_register_master(dev, master);
if (ret)
- goto out_pm_disable;
+ goto out_sysgroup_disable;
+
+ dev_info(dev, "Broadcom 63XX High Speed SPI Controller driver");
return 0;
+out_sysgroup_disable:
+ sysfs_remove_group(&pdev->dev.kobj, &bcm63xx_hsspi_group);
out_pm_disable:
pm_runtime_disable(&pdev->dev);
out_put_master:
@@ -470,6 +887,7 @@ static int bcm63xx_hsspi_remove(struct platform_device *pdev)
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
+ sysfs_remove_group(&pdev->dev.kobj, &bcm63xx_hsspi_group);
return 0;
}
@@ -516,6 +934,7 @@ static SIMPLE_DEV_PM_OPS(bcm63xx_hsspi_pm_ops, bcm63xx_hsspi_suspend,
static const struct of_device_id bcm63xx_hsspi_of_match[] = {
{ .compatible = "brcm,bcm6328-hsspi", },
+ { .compatible = "brcm,bcmbca-hsspi-v1.0", },
{ },
};
MODULE_DEVICE_TABLE(of, bcm63xx_hsspi_of_match);
diff --git a/drivers/spi/spi-bcmbca-hsspi.c b/drivers/spi/spi-bcmbca-hsspi.c
new file mode 100644
index 000000000000..3f9e6131ad86
--- /dev/null
+++ b/drivers/spi/spi-bcmbca-hsspi.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Broadcom BCMBCA High Speed SPI Controller driver
+ *
+ * Copyright 2000-2010 Broadcom Corporation
+ * Copyright 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ * Copyright 2019-2022 Broadcom Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/pm_runtime.h>
+
+#define HSSPI_GLOBAL_CTRL_REG 0x0
+#define GLOBAL_CTRL_CS_POLARITY_SHIFT 0
+#define GLOBAL_CTRL_CS_POLARITY_MASK 0x000000ff
+#define GLOBAL_CTRL_PLL_CLK_CTRL_SHIFT 8
+#define GLOBAL_CTRL_PLL_CLK_CTRL_MASK 0x0000ff00
+#define GLOBAL_CTRL_CLK_GATE_SSOFF BIT(16)
+#define GLOBAL_CTRL_CLK_POLARITY BIT(17)
+#define GLOBAL_CTRL_MOSI_IDLE BIT(18)
+
+#define HSSPI_GLOBAL_EXT_TRIGGER_REG 0x4
+
+#define HSSPI_INT_STATUS_REG 0x8
+#define HSSPI_INT_STATUS_MASKED_REG 0xc
+#define HSSPI_INT_MASK_REG 0x10
+
+#define HSSPI_PINGx_CMD_DONE(i) BIT((i * 8) + 0)
+#define HSSPI_PINGx_RX_OVER(i) BIT((i * 8) + 1)
+#define HSSPI_PINGx_TX_UNDER(i) BIT((i * 8) + 2)
+#define HSSPI_PINGx_POLL_TIMEOUT(i) BIT((i * 8) + 3)
+#define HSSPI_PINGx_CTRL_INVAL(i) BIT((i * 8) + 4)
+
+#define HSSPI_INT_CLEAR_ALL 0xff001f1f
+
+#define HSSPI_PINGPONG_COMMAND_REG(x) (0x80 + (x) * 0x40)
+#define PINGPONG_CMD_COMMAND_MASK 0xf
+#define PINGPONG_COMMAND_NOOP 0
+#define PINGPONG_COMMAND_START_NOW 1
+#define PINGPONG_COMMAND_START_TRIGGER 2
+#define PINGPONG_COMMAND_HALT 3
+#define PINGPONG_COMMAND_FLUSH 4
+#define PINGPONG_CMD_PROFILE_SHIFT 8
+#define PINGPONG_CMD_SS_SHIFT 12
+
+#define HSSPI_PINGPONG_STATUS_REG(x) (0x84 + (x) * 0x40)
+#define HSSPI_PINGPONG_STATUS_SRC_BUSY BIT(1)
+
+#define HSSPI_PROFILE_CLK_CTRL_REG(x) (0x100 + (x) * 0x20)
+#define CLK_CTRL_FREQ_CTRL_MASK 0x0000ffff
+#define CLK_CTRL_SPI_CLK_2X_SEL BIT(14)
+#define CLK_CTRL_ACCUM_RST_ON_LOOP BIT(15)
+#define CLK_CTRL_CLK_POLARITY BIT(16)
+
+#define HSSPI_PROFILE_SIGNAL_CTRL_REG(x) (0x104 + (x) * 0x20)
+#define SIGNAL_CTRL_LATCH_RISING BIT(12)
+#define SIGNAL_CTRL_LAUNCH_RISING BIT(13)
+#define SIGNAL_CTRL_ASYNC_INPUT_PATH BIT(16)
+
+#define HSSPI_PROFILE_MODE_CTRL_REG(x) (0x108 + (x) * 0x20)
+#define MODE_CTRL_MULTIDATA_RD_STRT_SHIFT 8
+#define MODE_CTRL_MULTIDATA_WR_STRT_SHIFT 12
+#define MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT 16
+#define MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT 18
+#define MODE_CTRL_MODE_3WIRE BIT(20)
+#define MODE_CTRL_PREPENDBYTE_CNT_SHIFT 24
+
+#define HSSPI_FIFO_REG(x) (0x200 + (x) * 0x200)
+
+#define HSSPI_OP_MULTIBIT BIT(11)
+#define HSSPI_OP_CODE_SHIFT 13
+#define HSSPI_OP_SLEEP (0 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ_WRITE (1 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_WRITE (2 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ (3 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_SETIRQ (4 << HSSPI_OP_CODE_SHIFT)
+
+#define HSSPI_BUFFER_LEN 512
+#define HSSPI_OPCODE_LEN 2
+
+#define HSSPI_MAX_PREPEND_LEN 15
+
+#define HSSPI_MAX_SYNC_CLOCK 30000000
+
+#define HSSPI_SPI_MAX_CS 8
+#define HSSPI_BUS_NUM 1 /* 0 is legacy SPI */
+#define HSSPI_POLL_STATUS_TIMEOUT_MS 100
+
+#define HSSPI_WAIT_MODE_POLLING 0
+#define HSSPI_WAIT_MODE_INTR 1
+#define HSSPI_WAIT_MODE_MAX HSSPI_WAIT_MODE_INTR
+
+#define SPIM_CTRL_CS_OVERRIDE_SEL_SHIFT 0
+#define SPIM_CTRL_CS_OVERRIDE_SEL_MASK 0xff
+#define SPIM_CTRL_CS_OVERRIDE_VAL_SHIFT 8
+#define SPIM_CTRL_CS_OVERRIDE_VAL_MASK 0xff
+
+struct bcmbca_hsspi {
+ struct completion done;
+ struct mutex bus_mutex;
+ struct mutex msg_mutex;
+ struct platform_device *pdev;
+ struct clk *clk;
+ struct clk *pll_clk;
+ void __iomem *regs;
+ void __iomem *spim_ctrl;
+ u8 __iomem *fifo;
+ u32 speed_hz;
+ u8 cs_polarity;
+ u32 wait_mode;
+};
+
+static ssize_t wait_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(ctrl);
+
+ return sprintf(buf, "%d\n", bs->wait_mode);
+}
+
+static ssize_t wait_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(ctrl);
+ u32 val;
+
+ if (kstrtou32(buf, 10, &val))
+ return -EINVAL;
+
+ if (val > HSSPI_WAIT_MODE_MAX) {
+ dev_warn(dev, "invalid wait mode %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bs->msg_mutex);
+ bs->wait_mode = val;
+ /* clear interrupt status to avoid spurious int on next transfer */
+ if (val == HSSPI_WAIT_MODE_INTR)
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+ mutex_unlock(&bs->msg_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(wait_mode);
+
+static struct attribute *bcmbca_hsspi_attrs[] = {
+ &dev_attr_wait_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group bcmbca_hsspi_group = {
+ .attrs = bcmbca_hsspi_attrs,
+};
+
+static void bcmbca_hsspi_set_cs(struct bcmbca_hsspi *bs, unsigned int cs,
+ bool active)
+{
+ u32 reg;
+
+ /* No cs orerriden needed for SS7 internal cs on pcm based voice dev */
+ if (cs == 7)
+ return;
+
+ mutex_lock(&bs->bus_mutex);
+
+ reg = __raw_readl(bs->spim_ctrl);
+ if (active)
+ reg |= BIT(cs + SPIM_CTRL_CS_OVERRIDE_SEL_SHIFT);
+ else
+ reg &= ~BIT(cs + SPIM_CTRL_CS_OVERRIDE_SEL_SHIFT);
+
+ __raw_writel(reg, bs->spim_ctrl);
+
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static void bcmbca_hsspi_set_clk(struct bcmbca_hsspi *bs,
+ struct spi_device *spi, int hz)
+{
+ unsigned int profile = spi->chip_select;
+ u32 reg;
+
+ reg = DIV_ROUND_UP(2048, DIV_ROUND_UP(bs->speed_hz, hz));
+ __raw_writel(CLK_CTRL_ACCUM_RST_ON_LOOP | reg,
+ bs->regs + HSSPI_PROFILE_CLK_CTRL_REG(profile));
+
+ reg = __raw_readl(bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+ if (hz > HSSPI_MAX_SYNC_CLOCK)
+ reg |= SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ else
+ reg &= ~SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ __raw_writel(reg, bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+
+ mutex_lock(&bs->bus_mutex);
+ /* setup clock polarity */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CLK_POLARITY;
+ if (spi->mode & SPI_CPOL)
+ reg |= GLOBAL_CTRL_CLK_POLARITY;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static int bcmbca_hsspi_wait_cmd(struct bcmbca_hsspi *bs, unsigned int cs)
+{
+ unsigned long limit;
+ u32 reg = 0;
+ int rc = 0;
+
+ if (bs->wait_mode == HSSPI_WAIT_MODE_INTR) {
+ if (wait_for_completion_timeout(&bs->done, HZ) == 0)
+ rc = 1;
+ } else {
+ limit = jiffies + msecs_to_jiffies(HSSPI_POLL_STATUS_TIMEOUT_MS);
+
+ while (!time_after(jiffies, limit)) {
+ reg = __raw_readl(bs->regs + HSSPI_PINGPONG_STATUS_REG(0));
+ if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
+ cpu_relax();
+ else
+ break;
+ }
+ if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
+ rc = 1;
+ }
+
+ if (rc)
+ dev_err(&bs->pdev->dev, "transfer timed out!\n");
+
+ return rc;
+}
+
+static int bcmbca_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t,
+ struct spi_message *msg)
+{
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(spi->master);
+ unsigned int chip_select = spi->chip_select;
+ u16 opcode = 0, val;
+ int pending = t->len;
+ int step_size = HSSPI_BUFFER_LEN;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+ u32 reg = 0, cs_act = 0;
+
+ bcmbca_hsspi_set_clk(bs, spi, t->speed_hz);
+
+ if (tx && rx)
+ opcode = HSSPI_OP_READ_WRITE;
+ else if (tx)
+ opcode = HSSPI_OP_WRITE;
+ else if (rx)
+ opcode = HSSPI_OP_READ;
+
+ if (opcode != HSSPI_OP_READ)
+ step_size -= HSSPI_OPCODE_LEN;
+
+ if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
+ (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
+ opcode |= HSSPI_OP_MULTIBIT;
+
+ if (t->rx_nbits == SPI_NBITS_DUAL)
+ reg |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
+ if (t->tx_nbits == SPI_NBITS_DUAL)
+ reg |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
+ }
+
+ __raw_writel(reg | 0xff,
+ bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+
+ while (pending > 0) {
+ int curr_step = min_t(int, step_size, pending);
+
+ reinit_completion(&bs->done);
+ if (tx) {
+ memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
+ tx += curr_step;
+ }
+
+ *(__be16 *)(&val) = cpu_to_be16(opcode | curr_step);
+ __raw_writew(val, bs->fifo);
+
+ /* enable interrupt */
+ if (bs->wait_mode == HSSPI_WAIT_MODE_INTR)
+ __raw_writel(HSSPI_PINGx_CMD_DONE(0),
+ bs->regs + HSSPI_INT_MASK_REG);
+
+ if (!cs_act) {
+ /* must apply cs signal as close as the cmd starts */
+ bcmbca_hsspi_set_cs(bs, chip_select, true);
+ cs_act = 1;
+ }
+
+ reg = chip_select << PINGPONG_CMD_SS_SHIFT |
+ chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+ PINGPONG_COMMAND_START_NOW;
+ __raw_writel(reg, bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+
+ if (bcmbca_hsspi_wait_cmd(bs, spi->chip_select))
+ return -ETIMEDOUT;
+
+ pending -= curr_step;
+
+ if (rx) {
+ memcpy_fromio(rx, bs->fifo, curr_step);
+ rx += curr_step;
+ }
+ }
+
+ return 0;
+}
+
+static int bcmbca_hsspi_setup(struct spi_device *spi)
+{
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = __raw_readl(bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+ reg &= ~(SIGNAL_CTRL_LAUNCH_RISING | SIGNAL_CTRL_LATCH_RISING);
+ if (spi->mode & SPI_CPHA)
+ reg |= SIGNAL_CTRL_LAUNCH_RISING;
+ else
+ reg |= SIGNAL_CTRL_LATCH_RISING;
+ __raw_writel(reg, bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ if (spi->mode & SPI_CS_HIGH)
+ reg |= BIT(spi->chip_select);
+ else
+ reg &= ~BIT(spi->chip_select);
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ if (spi->mode & SPI_CS_HIGH)
+ bs->cs_polarity |= BIT(spi->chip_select);
+ else
+ bs->cs_polarity &= ~BIT(spi->chip_select);
+
+ reg = __raw_readl(bs->spim_ctrl);
+ reg &= ~BIT(spi->chip_select + SPIM_CTRL_CS_OVERRIDE_VAL_SHIFT);
+ if (spi->mode & SPI_CS_HIGH)
+ reg |= BIT(spi->chip_select + SPIM_CTRL_CS_OVERRIDE_VAL_SHIFT);
+ __raw_writel(reg, bs->spim_ctrl);
+
+ mutex_unlock(&bs->bus_mutex);
+
+ return 0;
+}
+
+static int bcmbca_hsspi_transfer_one(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ struct spi_device *spi = msg->spi;
+ int status = -EINVAL;
+ bool keep_cs = false;
+
+ mutex_lock(&bs->msg_mutex);
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ status = bcmbca_hsspi_do_txrx(spi, t, msg);
+ if (status)
+ break;
+
+ spi_transfer_delay_exec(t);
+
+ if (t->cs_change) {
+ if (list_is_last(&t->transfer_list, &msg->transfers)) {
+ keep_cs = true;
+ } else {
+ if (!t->cs_off)
+ bcmbca_hsspi_set_cs(bs, spi->chip_select, false);
+
+ spi_transfer_cs_change_delay_exec(msg, t);
+
+ if (!list_next_entry(t, transfer_list)->cs_off)
+ bcmbca_hsspi_set_cs(bs, spi->chip_select, true);
+ }
+ } else if (!list_is_last(&t->transfer_list, &msg->transfers) &&
+ t->cs_off != list_next_entry(t, transfer_list)->cs_off) {
+ bcmbca_hsspi_set_cs(bs, spi->chip_select, t->cs_off);
+ }
+
+ msg->actual_length += t->len;
+ }
+
+ mutex_unlock(&bs->msg_mutex);
+
+ if (status || !keep_cs)
+ bcmbca_hsspi_set_cs(bs, spi->chip_select, false);
+
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static irqreturn_t bcmbca_hsspi_interrupt(int irq, void *dev_id)
+{
+ struct bcmbca_hsspi *bs = (struct bcmbca_hsspi *)dev_id;
+
+ if (__raw_readl(bs->regs + HSSPI_INT_STATUS_MASKED_REG) == 0)
+ return IRQ_NONE;
+
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ complete(&bs->done);
+
+ return IRQ_HANDLED;
+}
+
+static int bcmbca_hsspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcmbca_hsspi *bs;
+ struct resource *res_mem;
+ void __iomem *spim_ctrl;
+ void __iomem *regs;
+ struct device *dev = &pdev->dev;
+ struct clk *clk, *pll_clk = NULL;
+ int irq, ret;
+ u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsspi");
+ if (!res_mem)
+ return -EINVAL;
+ regs = devm_ioremap_resource(dev, res_mem);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spim-ctrl");
+ if (!res_mem)
+ return -EINVAL;
+ spim_ctrl = devm_ioremap_resource(dev, res_mem);
+ if (IS_ERR(spim_ctrl))
+ return PTR_ERR(spim_ctrl);
+
+ clk = devm_clk_get(dev, "hsspi");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ rate = clk_get_rate(clk);
+ if (!rate) {
+ pll_clk = devm_clk_get(dev, "pll");
+
+ if (IS_ERR(pll_clk)) {
+ ret = PTR_ERR(pll_clk);
+ goto out_disable_clk;
+ }
+
+ ret = clk_prepare_enable(pll_clk);
+ if (ret)
+ goto out_disable_clk;
+
+ rate = clk_get_rate(pll_clk);
+ if (!rate) {
+ ret = -EINVAL;
+ goto out_disable_pll_clk;
+ }
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ ret = -ENOMEM;
+ goto out_disable_pll_clk;
+ }
+
+ bs = spi_master_get_devdata(master);
+ bs->pdev = pdev;
+ bs->clk = clk;
+ bs->pll_clk = pll_clk;
+ bs->regs = regs;
+ bs->spim_ctrl = spim_ctrl;
+ bs->speed_hz = rate;
+ bs->fifo = (u8 __iomem *) (bs->regs + HSSPI_FIFO_REG(0));
+ bs->wait_mode = HSSPI_WAIT_MODE_POLLING;
+
+ mutex_init(&bs->bus_mutex);
+ mutex_init(&bs->msg_mutex);
+ init_completion(&bs->done);
+
+ master->dev.of_node = dev->of_node;
+ if (!dev->of_node)
+ master->bus_num = HSSPI_BUS_NUM;
+
+ of_property_read_u32(dev->of_node, "num-cs", &num_cs);
+ if (num_cs > 8) {
+ dev_warn(dev, "unsupported number of cs (%i), reducing to 8\n",
+ num_cs);
+ num_cs = HSSPI_SPI_MAX_CS;
+ }
+ master->num_chipselect = num_cs;
+ master->setup = bcmbca_hsspi_setup;
+ master->transfer_one_message = bcmbca_hsspi_transfer_one;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+ SPI_RX_DUAL | SPI_TX_DUAL;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ /* Initialize the hardware */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ /* clean up any pending interrupts */
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+
+ /* read out default CS polarities */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ bs->cs_polarity = reg & GLOBAL_CTRL_CS_POLARITY_MASK;
+ __raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
+ bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, bcmbca_hsspi_interrupt, IRQF_SHARED,
+ pdev->name, bs);
+ if (ret)
+ goto out_put_master;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't register sysfs group\n");
+ goto out_pm_disable;
+ }
+
+ /* register and we are done */
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto out_sysgroup_disable;
+
+ dev_info(dev, "Broadcom BCMBCA High Speed SPI Controller driver");
+
+ return 0;
+
+out_sysgroup_disable:
+ sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
+out_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+out_put_master:
+ spi_master_put(master);
+out_disable_pll_clk:
+ clk_disable_unprepare(pll_clk);
+out_disable_clk:
+ clk_disable_unprepare(clk);
+ return ret;
+}
+
+static int bcmbca_hsspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+
+ /* reset the hardware and block queue progress */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+ clk_disable_unprepare(bs->pll_clk);
+ clk_disable_unprepare(bs->clk);
+ sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcmbca_hsspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+ clk_disable_unprepare(bs->pll_clk);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static int bcmbca_hsspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
+
+ if (bs->pll_clk) {
+ ret = clk_prepare_enable(bs->pll_clk);
+ if (ret) {
+ clk_disable_unprepare(bs->clk);
+ return ret;
+ }
+ }
+
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bcmbca_hsspi_pm_ops, bcmbca_hsspi_suspend,
+ bcmbca_hsspi_resume);
+
+static const struct of_device_id bcmbca_hsspi_of_match[] = {
+ { .compatible = "brcm,bcmbca-hsspi-v1.1", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, bcmbca_hsspi_of_match);
+
+static struct platform_driver bcmbca_hsspi_driver = {
+ .driver = {
+ .name = "bcmbca-hsspi",
+ .pm = &bcmbca_hsspi_pm_ops,
+ .of_match_table = bcmbca_hsspi_of_match,
+ },
+ .probe = bcmbca_hsspi_probe,
+ .remove = bcmbca_hsspi_remove,
+};
+
+module_platform_driver(bcmbca_hsspi_driver);
+
+MODULE_ALIAS("platform:bcmbca_hsspi");
+MODULE_DESCRIPTION("Broadcom BCMBCA High Speed SPI Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 676313e1bdad..2954c06a7f57 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -84,6 +84,7 @@ struct cqspi_st {
u32 trigger_address;
u32 wr_delay;
bool use_direct_mode;
+ bool use_direct_mode_wr;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
bool use_dma_read;
u32 pd_dev_id;
@@ -531,6 +532,17 @@ static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
/* 0 means 1 byte. */
reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+
+ /* setup ADDR BIT field */
+ if (op->addr.nbytes) {
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= ((op->addr.nbytes - 1) &
+ CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+
+ writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
+ }
+
status = cqspi_exec_flash_cmd(cqspi, reg);
if (status)
return status;
@@ -549,6 +561,9 @@ static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
memcpy(rxbuf, &reg, read_len);
}
+ /* Reset CMD_CTRL Reg once command read completes */
+ writel(0, reg_base + CQSPI_REG_CMDCTRL);
+
return 0;
}
@@ -613,7 +628,12 @@ static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
}
}
- return cqspi_exec_flash_cmd(cqspi, reg);
+ ret = cqspi_exec_flash_cmd(cqspi, reg);
+
+ /* Reset CMD_CTRL Reg once command write completes */
+ writel(0, reg_base + CQSPI_REG_CMDCTRL);
+
+ return ret;
}
static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
@@ -937,6 +957,12 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ /*
+ * DAC mode require auto polling as flash needs to be polled
+ * for write completion in case of bubble in SPI transaction
+ * due to slow CPU/DMA master.
+ */
+ cqspi->use_direct_mode_wr = false;
}
reg = readl(reg_base + CQSPI_REG_SIZE);
@@ -1222,7 +1248,7 @@ static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
* data.
*/
if (!op->cmd.dtr && cqspi->use_direct_mode &&
- ((to + len) <= cqspi->ahb_size)) {
+ cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
return cqspi_wait_idle(cqspi);
}
@@ -1333,7 +1359,13 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
cqspi_configure(f_pdata, mem->spi->max_speed_hz);
if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
- if (!op->addr.nbytes)
+ /*
+ * Performing reads in DAC mode forces to read minimum 4 bytes
+ * which is unsupported on some flash devices during register
+ * reads, prefer STIG mode for such small reads.
+ */
+ if (!op->addr.nbytes ||
+ op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX)
return cqspi_command_read(f_pdata, op);
return cqspi_read(f_pdata, op);
@@ -1692,8 +1724,10 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->master_ref_clk_hz);
if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
- if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE))
+ if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) {
cqspi->use_direct_mode = true;
+ cqspi->use_direct_mode_wr = true;
+ }
if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
cqspi->use_dma_read = true;
if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 4e83cc5b445d..e1b2e0b65779 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -87,6 +87,8 @@ struct spi_geni_master {
struct completion cs_done;
struct completion cancel_done;
struct completion abort_done;
+ struct completion tx_reset_done;
+ struct completion rx_reset_done;
unsigned int oversampling;
spinlock_t lock;
int irq;
@@ -95,6 +97,8 @@ struct spi_geni_master {
struct dma_chan *tx;
struct dma_chan *rx;
int cur_xfer_mode;
+ dma_addr_t tx_se_dma;
+ dma_addr_t rx_se_dma;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -129,23 +133,27 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
return ret;
}
-static void handle_fifo_timeout(struct spi_master *spi,
+static void handle_se_timeout(struct spi_master *spi,
struct spi_message *msg)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
unsigned long time_left;
struct geni_se *se = &mas->se;
+ const struct spi_transfer *xfer;
spin_lock_irq(&mas->lock);
reinit_completion(&mas->cancel_done);
- writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ if (mas->cur_xfer_mode == GENI_SE_FIFO)
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+
+ xfer = mas->cur_xfer;
mas->cur_xfer = NULL;
geni_se_cancel_m_cmd(se);
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
if (time_left)
- return;
+ goto unmap_if_dma;
spin_lock_irq(&mas->lock);
reinit_completion(&mas->abort_done);
@@ -162,6 +170,39 @@ static void handle_fifo_timeout(struct spi_master *spi,
*/
mas->abort_failed = true;
}
+
+unmap_if_dma:
+ if (mas->cur_xfer_mode == GENI_SE_DMA) {
+ if (xfer) {
+ if (xfer->tx_buf && mas->tx_se_dma) {
+ spin_lock_irq(&mas->lock);
+ reinit_completion(&mas->tx_reset_done);
+ writel(1, se->base + SE_DMA_TX_FSM_RST);
+ spin_unlock_irq(&mas->lock);
+ time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
+ if (!time_left)
+ dev_err(mas->dev, "DMA TX RESET failed\n");
+ geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
+ }
+ if (xfer->rx_buf && mas->rx_se_dma) {
+ spin_lock_irq(&mas->lock);
+ reinit_completion(&mas->rx_reset_done);
+ writel(1, se->base + SE_DMA_RX_FSM_RST);
+ spin_unlock_irq(&mas->lock);
+ time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
+ if (!time_left)
+ dev_err(mas->dev, "DMA RX RESET failed\n");
+ geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
+ }
+ } else {
+ /*
+ * This can happen if a timeout happened and we had to wait
+ * for lock in this function because isr was holding the lock
+ * and handling transfer completion at that time.
+ */
+ dev_warn(mas->dev, "Cancel/Abort on completed SPI transfer\n");
+ }
+ }
}
static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
@@ -178,7 +219,8 @@ static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
switch (mas->cur_xfer_mode) {
case GENI_SE_FIFO:
- handle_fifo_timeout(spi, msg);
+ case GENI_SE_DMA:
+ handle_se_timeout(spi, msg);
break;
case GENI_GPI_DMA:
handle_gpi_timeout(spi, msg);
@@ -250,6 +292,8 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
}
mas->cs_flag = set_flag;
+ /* set xfer_mode to FIFO to complete cs_done in isr */
+ mas->cur_xfer_mode = GENI_SE_FIFO;
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -260,7 +304,7 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
if (!time_left) {
dev_warn(mas->dev, "Timeout setting chip select\n");
- handle_fifo_timeout(spi, NULL);
+ handle_se_timeout(spi, NULL);
}
exit:
@@ -482,8 +526,12 @@ static bool geni_can_dma(struct spi_controller *ctlr,
{
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
- /* check if dma is supported */
- return mas->cur_xfer_mode != GENI_SE_FIFO;
+ /*
+ * Return true if transfer needs to be mapped prior to
+ * calling transfer_one which is the case only for GPI_DMA.
+ * For SE_DMA mode, map/unmap is done in geni_se_*x_dma_prep.
+ */
+ return mas->cur_xfer_mode == GENI_GPI_DMA;
}
static int spi_geni_prepare_message(struct spi_master *spi,
@@ -494,6 +542,7 @@ static int spi_geni_prepare_message(struct spi_master *spi,
switch (mas->cur_xfer_mode) {
case GENI_SE_FIFO:
+ case GENI_SE_DMA:
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
ret = setup_fifo_params(spi_msg->spi, spi);
@@ -597,7 +646,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
break;
}
/*
- * in case of failure to get dma channel, we can still do the
+ * in case of failure to get gpi dma channel, we can still do the
* FIFO mode, so fallthrough
*/
dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
@@ -716,12 +765,12 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
mas->rx_rem_bytes -= rx_bytes;
}
-static void setup_fifo_xfer(struct spi_transfer *xfer,
+static int setup_se_xfer(struct spi_transfer *xfer,
struct spi_geni_master *mas,
u16 mode, struct spi_master *spi)
{
u32 m_cmd = 0;
- u32 len;
+ u32 len, fifo_size;
struct geni_se *se = &mas->se;
int ret;
@@ -748,7 +797,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
/* Speed and bits per word can be overridden per transfer */
ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
if (ret)
- return;
+ return ret;
mas->tx_rem_bytes = 0;
mas->rx_rem_bytes = 0;
@@ -772,17 +821,50 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
mas->rx_rem_bytes = xfer->len;
}
+ /* Select transfer mode based on transfer length */
+ fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
+ mas->cur_xfer_mode = (len <= fifo_size) ? GENI_SE_FIFO : GENI_SE_DMA;
+ geni_se_select_mode(se, mas->cur_xfer_mode);
+
/*
* Lock around right before we start the transfer since our
* interrupt could come in at any time now.
*/
spin_lock_irq(&mas->lock);
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
- if (m_cmd & SPI_TX_ONLY) {
+
+ if (mas->cur_xfer_mode == GENI_SE_DMA) {
+ if (m_cmd & SPI_RX_ONLY) {
+ ret = geni_se_rx_dma_prep(se, xfer->rx_buf,
+ xfer->len, &mas->rx_se_dma);
+ if (ret) {
+ dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
+ mas->rx_se_dma = 0;
+ goto unlock_and_return;
+ }
+ }
+ if (m_cmd & SPI_TX_ONLY) {
+ ret = geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
+ xfer->len, &mas->tx_se_dma);
+ if (ret) {
+ dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
+ mas->tx_se_dma = 0;
+ if (m_cmd & SPI_RX_ONLY) {
+ /* Unmap rx buffer if duplex transfer */
+ geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
+ mas->rx_se_dma = 0;
+ }
+ goto unlock_and_return;
+ }
+ }
+ } else if (m_cmd & SPI_TX_ONLY) {
if (geni_spi_handle_tx(mas))
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
}
+
+unlock_and_return:
spin_unlock_irq(&mas->lock);
+ return ret;
}
static int spi_geni_transfer_one(struct spi_master *spi,
@@ -790,6 +872,7 @@ static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_transfer *xfer)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
@@ -798,9 +881,12 @@ static int spi_geni_transfer_one(struct spi_master *spi,
if (!xfer->len)
return 0;
- if (mas->cur_xfer_mode == GENI_SE_FIFO) {
- setup_fifo_xfer(xfer, mas, slv->mode, spi);
- return 1;
+ if (mas->cur_xfer_mode == GENI_SE_FIFO || mas->cur_xfer_mode == GENI_SE_DMA) {
+ ret = setup_se_xfer(xfer, mas, slv->mode, spi);
+ /* SPI framework expects +ve ret code to wait for transfer complete */
+ if (!ret)
+ ret = 1;
+ return ret;
}
return setup_gsi_xfer(xfer, mas, slv, spi);
}
@@ -823,39 +909,70 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
spin_lock(&mas->lock);
- if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
- geni_spi_handle_rx(mas);
-
- if (m_irq & M_TX_FIFO_WATERMARK_EN)
- geni_spi_handle_tx(mas);
-
- if (m_irq & M_CMD_DONE_EN) {
- if (mas->cur_xfer) {
+ if (mas->cur_xfer_mode == GENI_SE_FIFO) {
+ if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
+ geni_spi_handle_rx(mas);
+
+ if (m_irq & M_TX_FIFO_WATERMARK_EN)
+ geni_spi_handle_tx(mas);
+
+ if (m_irq & M_CMD_DONE_EN) {
+ if (mas->cur_xfer) {
+ spi_finalize_current_transfer(spi);
+ mas->cur_xfer = NULL;
+ /*
+ * If this happens, then a CMD_DONE came before all the
+ * Tx buffer bytes were sent out. This is unusual, log
+ * this condition and disable the WM interrupt to
+ * prevent the system from stalling due an interrupt
+ * storm.
+ *
+ * If this happens when all Rx bytes haven't been
+ * received, log the condition. The only known time
+ * this can happen is if bits_per_word != 8 and some
+ * registers that expect xfer lengths in num spi_words
+ * weren't written correctly.
+ */
+ if (mas->tx_rem_bytes) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
+ mas->tx_rem_bytes, mas->cur_bits_per_word);
+ }
+ if (mas->rx_rem_bytes)
+ dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
+ mas->rx_rem_bytes, mas->cur_bits_per_word);
+ } else {
+ complete(&mas->cs_done);
+ }
+ }
+ } else if (mas->cur_xfer_mode == GENI_SE_DMA) {
+ const struct spi_transfer *xfer = mas->cur_xfer;
+ u32 dma_tx_status = readl_relaxed(se->base + SE_DMA_TX_IRQ_STAT);
+ u32 dma_rx_status = readl_relaxed(se->base + SE_DMA_RX_IRQ_STAT);
+
+ if (dma_tx_status)
+ writel(dma_tx_status, se->base + SE_DMA_TX_IRQ_CLR);
+ if (dma_rx_status)
+ writel(dma_rx_status, se->base + SE_DMA_RX_IRQ_CLR);
+ if (dma_tx_status & TX_DMA_DONE)
+ mas->tx_rem_bytes = 0;
+ if (dma_rx_status & RX_DMA_DONE)
+ mas->rx_rem_bytes = 0;
+ if (dma_tx_status & TX_RESET_DONE)
+ complete(&mas->tx_reset_done);
+ if (dma_rx_status & RX_RESET_DONE)
+ complete(&mas->rx_reset_done);
+ if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
+ if (xfer->tx_buf && mas->tx_se_dma) {
+ geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
+ mas->tx_se_dma = 0;
+ }
+ if (xfer->rx_buf && mas->rx_se_dma) {
+ geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
+ mas->rx_se_dma = 0;
+ }
spi_finalize_current_transfer(spi);
mas->cur_xfer = NULL;
- /*
- * If this happens, then a CMD_DONE came before all the
- * Tx buffer bytes were sent out. This is unusual, log
- * this condition and disable the WM interrupt to
- * prevent the system from stalling due an interrupt
- * storm.
- *
- * If this happens when all Rx bytes haven't been
- * received, log the condition. The only known time
- * this can happen is if bits_per_word != 8 and some
- * registers that expect xfer lengths in num spi_words
- * weren't written correctly.
- */
- if (mas->tx_rem_bytes) {
- writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
- dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
- mas->tx_rem_bytes, mas->cur_bits_per_word);
- }
- if (mas->rx_rem_bytes)
- dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
- mas->rx_rem_bytes, mas->cur_bits_per_word);
- } else {
- complete(&mas->cs_done);
}
}
@@ -949,6 +1066,8 @@ static int spi_geni_probe(struct platform_device *pdev)
init_completion(&mas->cs_done);
init_completion(&mas->cancel_done);
init_completion(&mas->abort_done);
+ init_completion(&mas->tx_reset_done);
+ init_completion(&mas->rx_reset_done);
spin_lock_init(&mas->lock);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index f0d532ea40e8..4d69e320d018 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -60,12 +60,12 @@ static int intel_spi_pci_probe(struct pci_dev *pdev,
}
static const struct pci_device_id intel_spi_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x38a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
@@ -75,11 +75,14 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x9d24), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x9da4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index f619212b0d5c..f4679868c49f 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -104,7 +104,7 @@
#define BXT_PR 0x84
#define BXT_SSFSTS_CTL 0xa0
#define BXT_FREG_NUM 12
-#define BXT_PR_NUM 6
+#define BXT_PR_NUM 5
#define CNL_PR 0x84
#define CNL_FREG_NUM 6
@@ -1368,14 +1368,14 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
if (!spi_new_device(ispi->master, &chip))
return -ENODEV;
- /* Add the second chip if present */
- if (ispi->master->num_chipselect < 2)
- return 0;
-
ret = intel_spi_read_desc(ispi);
if (ret)
return ret;
+ /* Add the second chip if present */
+ if (ispi->master->num_chipselect < 2)
+ return 0;
+
chip.platform_data = NULL;
chip.chip_select = 1;
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index dd7de8fa37d0..313106eb8d40 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -71,6 +71,11 @@ module_param(check_ranges, int, 0644);
MODULE_PARM_DESC(check_ranges,
"checks rx_buffer pattern are valid");
+static unsigned int delay_ms = 100;
+module_param(delay_ms, uint, 0644);
+MODULE_PARM_DESC(delay_ms,
+ "delay between tests, in milliseconds (default: 100)");
+
/* the actual tests to execute */
static struct spi_test spi_tests[] = {
{
@@ -1098,7 +1103,8 @@ int spi_test_run_tests(struct spi_device *spi,
* detect the individual tests when using a logic analyzer
* we also add scheduling to avoid potential spi_timeouts...
*/
- mdelay(100);
+ if (delay_ms)
+ mdelay(delay_ms);
schedule();
}
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 0c79193d9697..701838b6f0c4 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -325,7 +325,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (!spi_mem_internal_supports_op(mem, op))
return -ENOTSUPP;
- if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
+ if (ctlr->mem_ops && ctlr->mem_ops->exec_op && !mem->spi->cs_gpiod) {
ret = spi_mem_access_start(mem);
if (ret)
return ret;
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
index fa8412ba20e2..f3f95eb37365 100644
--- a/drivers/spi/spi-mtk-snfi.c
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -195,6 +195,8 @@
#define DATA_READ_MODE_X4 2
#define DATA_READ_MODE_DUAL 5
#define DATA_READ_MODE_QUAD 6
+#define DATA_READ_LATCH_LAT GENMASK(9, 8)
+#define DATA_READ_LATCH_LAT_S 8
#define PG_LOAD_CUSTOM_EN BIT(7)
#define DATARD_CUSTOM_EN BIT(6)
#define CS_DESELECT_CYC_S 0
@@ -205,6 +207,9 @@
#define SNF_DLY_CTL3 0x548
#define SFCK_SAM_DLY_S 0
+#define SFCK_SAM_DLY GENMASK(5, 0)
+#define SFCK_SAM_DLY_TOTAL 9
+#define SFCK_SAM_DLY_RANGE 47
#define SNF_STA_CTL1 0x550
#define CUS_PG_DONE BIT(28)
@@ -297,6 +302,7 @@ struct mtk_snand {
struct device *dev;
struct clk *nfi_clk;
struct clk *pad_clk;
+ struct clk *nfi_hclk;
void __iomem *nfi_base;
int irq;
struct completion op_done;
@@ -1339,7 +1345,16 @@ static int mtk_snand_enable_clk(struct mtk_snand *ms)
dev_err(ms->dev, "unable to enable pad clk\n");
goto err1;
}
+ ret = clk_prepare_enable(ms->nfi_hclk);
+ if (ret) {
+ dev_err(ms->dev, "unable to enable nfi hclk\n");
+ goto err2;
+ }
+
return 0;
+
+err2:
+ clk_disable_unprepare(ms->pad_clk);
err1:
clk_disable_unprepare(ms->nfi_clk);
return ret;
@@ -1347,6 +1362,7 @@ err1:
static void mtk_snand_disable_clk(struct mtk_snand *ms)
{
+ clk_disable_unprepare(ms->nfi_hclk);
clk_disable_unprepare(ms->pad_clk);
clk_disable_unprepare(ms->nfi_clk);
}
@@ -1357,6 +1373,8 @@ static int mtk_snand_probe(struct platform_device *pdev)
const struct of_device_id *dev_id;
struct spi_controller *ctlr;
struct mtk_snand *ms;
+ unsigned long spi_freq;
+ u32 val = 0;
int ret;
dev_id = of_match_node(mtk_snand_ids, np);
@@ -1401,6 +1419,13 @@ static int mtk_snand_probe(struct platform_device *pdev)
goto release_ecc;
}
+ ms->nfi_hclk = devm_clk_get_optional(&pdev->dev, "nfi_hclk");
+ if (IS_ERR(ms->nfi_hclk)) {
+ ret = PTR_ERR(ms->nfi_hclk);
+ dev_err(&pdev->dev, "unable to get nfi_hclk, err = %d\n", ret);
+ goto release_ecc;
+ }
+
ret = mtk_snand_enable_clk(ms);
if (ret)
goto release_ecc;
@@ -1428,10 +1453,22 @@ static int mtk_snand_probe(struct platform_device *pdev)
// switch to SNFI mode
nfi_write32(ms, SNF_CFG, SPI_MODE);
+ ret = of_property_read_u32(np, "rx-sample-delay-ns", &val);
+ if (!ret)
+ nfi_rmw32(ms, SNF_DLY_CTL3, SFCK_SAM_DLY,
+ val * SFCK_SAM_DLY_RANGE / SFCK_SAM_DLY_TOTAL);
+
+ ret = of_property_read_u32(np, "mediatek,rx-latch-latency-ns", &val);
+ if (!ret) {
+ spi_freq = clk_get_rate(ms->pad_clk);
+ val = DIV_ROUND_CLOSEST(val, NSEC_PER_SEC / spi_freq);
+ nfi_rmw32(ms, SNF_MISC_CTL, DATA_READ_LATCH_LAT,
+ val << DATA_READ_LATCH_LAT_S);
+ }
+
// setup an initial page format for ops matching page_cache_op template
// before ECC is called.
- ret = mtk_snand_setup_pagefmt(ms, ms->caps->sector_size,
- ms->caps->spare_sizes[0]);
+ ret = mtk_snand_setup_pagefmt(ms, SZ_2K, SZ_64);
if (ret) {
dev_err(ms->dev, "failed to set initial page format\n");
goto disable_clk;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index e4484ace584e..a17ff839117f 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2091,7 +2091,6 @@ pl022_platform_data_dt_get(struct device *dev)
return NULL;
pd->bus_id = -1;
- pd->enable_dma = 1;
of_property_read_u32(np, "pl022,autosuspend-delay",
&pd->autosuspend_delay);
pd->rt = of_property_read_bool(np, "pl022,rt");
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
index 47cbe73137c2..dc188f9202c9 100644
--- a/drivers/spi/spi-synquacer.c
+++ b/drivers/spi/spi-synquacer.c
@@ -472,10 +472,9 @@ static int synquacer_spi_transfer_one(struct spi_master *master,
read_fifo(sspi);
}
- if (status < 0) {
- dev_err(sspi->dev, "failed to transfer. status: 0x%x\n",
- status);
- return status;
+ if (status == 0) {
+ dev_err(sspi->dev, "failed to transfer. Timeout.\n");
+ return -ETIMEDOUT;
}
return 0;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 7377d3b81302..1411548f4255 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -83,7 +83,7 @@ struct xilinx_spi {
void __iomem *regs; /* virt. address of the control registers */
int irq;
-
+ bool force_irq; /* force irq to setup master inhibit */
u8 *rx_ptr; /* pointer in the Tx buffer */
const u8 *tx_ptr; /* pointer in the Rx buffer */
u8 bytes_per_word;
@@ -248,7 +248,8 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
xspi->rx_ptr = t->rx_buf;
remaining_words = t->len / xspi->bytes_per_word;
- if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
+ if (xspi->irq >= 0 &&
+ (xspi->force_irq || remaining_words > xspi->buffer_size)) {
u32 isr;
use_irq = true;
/* Inhibit irq to avoid spurious irqs on tx_empty*/
@@ -393,6 +394,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
struct resource *res;
int ret, num_cs = 0, bits_per_word;
struct spi_master *master;
+ bool force_irq = false;
u32 tmp;
u8 i;
@@ -400,6 +402,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
if (pdata) {
num_cs = pdata->num_chipselect;
bits_per_word = pdata->bits_per_word;
+ force_irq = pdata->force_irq;
} else {
of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
&num_cs);
@@ -477,6 +480,8 @@ static int xilinx_spi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), xspi);
if (ret)
return ret;
+
+ xspi->force_irq = force_irq;
}
/* SPI controller initializations */
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3f33934f5429..5866bf5813a4 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -604,7 +604,7 @@ static void spi_dev_set_name(struct spi_device *spi)
}
dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
- spi->chip_select);
+ spi_get_chipselect(spi, 0));
}
static int spi_dev_check(struct device *dev, void *data)
@@ -613,7 +613,7 @@ static int spi_dev_check(struct device *dev, void *data)
struct spi_device *new_spi = data;
if (spi->controller == new_spi->controller &&
- spi->chip_select == new_spi->chip_select)
+ spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0))
return -EBUSY;
return 0;
}
@@ -638,7 +638,7 @@ static int __spi_add_device(struct spi_device *spi)
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
if (status) {
dev_err(dev, "chipselect %d already in use\n",
- spi->chip_select);
+ spi_get_chipselect(spi, 0));
return status;
}
@@ -649,7 +649,7 @@ static int __spi_add_device(struct spi_device *spi)
}
if (ctlr->cs_gpiods)
- spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
+ spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
/*
* Drivers may modify this initial i/o setup, but will
@@ -692,8 +692,8 @@ int spi_add_device(struct spi_device *spi)
int status;
/* Chipselects are numbered 0..max; validate. */
- if (spi->chip_select >= ctlr->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
+ if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
ctlr->num_chipselect);
return -EINVAL;
}
@@ -714,8 +714,8 @@ static int spi_add_device_locked(struct spi_device *spi)
struct device *dev = ctlr->dev.parent;
/* Chipselects are numbered 0..max; validate. */
- if (spi->chip_select >= ctlr->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
+ if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
ctlr->num_chipselect);
return -EINVAL;
}
@@ -761,7 +761,7 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
- proxy->chip_select = chip->chip_select;
+ spi_set_chipselect(proxy, 0, chip->chip_select);
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
proxy->irq = chip->irq;
@@ -970,24 +970,23 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* Avoid calling into the driver (or doing delays) if the chip select
* isn't actually changing from the last time this was called.
*/
- if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
- (!enable && spi->controller->last_cs != spi->chip_select)) &&
+ if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) ||
+ (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) &&
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
trace_spi_set_cs(spi, activate);
- spi->controller->last_cs = enable ? spi->chip_select : -1;
+ spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
- if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
+ if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate)
spi_delay_exec(&spi->cs_hold, NULL);
- }
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (spi->cs_gpiod) {
+ if (spi_get_csgpiod(spi, 0)) {
if (!(spi->mode & SPI_NO_CS)) {
/*
* Historically ACPI has no means of the GPIO polarity and
@@ -1000,10 +999,10 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* into account.
*/
if (has_acpi_companion(&spi->dev))
- gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable);
else
/* Polarity handled by GPIO library */
- gpiod_set_value_cansleep(spi->cs_gpiod, activate);
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
@@ -1013,7 +1012,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi->controller->set_cs(spi, !enable);
}
- if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
+ if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) {
if (activate)
spi_delay_exec(&spi->cs_setup, NULL);
else
@@ -1484,6 +1483,13 @@ static void _spi_transfer_cs_change_delay(struct spi_message *msg,
}
}
+void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ _spi_transfer_cs_change_delay(msg, xfer);
+}
+EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
+
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
@@ -1921,7 +1927,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
/* Capture the resolution of the timestamp */
xfer->ptp_sts_word_post = progress;
- xfer->timestamped = true;
+ xfer->timestamped = 1;
}
EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
@@ -2319,7 +2325,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
nc, rc);
return rc;
}
- spi->chip_select = value;
+ spi_set_chipselect(spi, 0, value);
/* Device speed */
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
@@ -2327,6 +2333,8 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
/* Device CS delays */
of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
return 0;
}
@@ -2436,7 +2444,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
/* Use provided chip-select for ancillary device */
- ancillary->chip_select = chip_select;
+ spi_set_chipselect(ancillary, 0, chip_select);
/* Take over SPI mode/speed from SPI main device */
ancillary->max_speed_hz = spi->max_speed_hz;
@@ -2683,7 +2691,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
spi->mode |= lookup.mode;
spi->irq = lookup.irq;
spi->bits_per_word = lookup.bits_per_word;
- spi->chip_select = lookup.chip_select;
+ spi_set_chipselect(spi, 0, lookup.chip_select);
return spi;
}
@@ -3064,15 +3072,14 @@ static int spi_controller_check_ops(struct spi_controller *ctlr)
* The controller may implement only the high-level SPI-memory like
* operations if it does not support regular SPI transfers, and this is
* valid use case.
- * If ->mem_ops is NULL, we request that at least one of the
- * ->transfer_xxx() method be implemented.
+ * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
+ * one of the ->transfer_xxx() method be implemented.
*/
- if (ctlr->mem_ops) {
- if (!ctlr->mem_ops->exec_op)
- return -EINVAL;
- } else if (!ctlr->transfer && !ctlr->transfer_one &&
+ if (!ctlr->mem_ops || (ctlr->mem_ops && !ctlr->mem_ops->exec_op)) {
+ if (!ctlr->transfer && !ctlr->transfer_one &&
!ctlr->transfer_one_message) {
- return -EINVAL;
+ return -EINVAL;
+ }
}
return 0;
@@ -3645,7 +3652,7 @@ static int spi_set_cs_timing(struct spi_device *spi)
struct device *parent = spi->controller->dev.parent;
int status = 0;
- if (spi->controller->set_cs_timing && !spi->cs_gpiod) {
+ if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
if (spi->controller->auto_runtime_pm) {
status = pm_runtime_get_sync(parent);
if (status < 0) {
@@ -3850,7 +3857,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* cs_change is set for each transfer.
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
- spi->cs_gpiod)) {
+ spi_get_csgpiod(spi, 0))) {
size_t maxsize;
int ret;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index a1ea093795cf..5a038c667401 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -116,7 +116,6 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
status = spidev_sync_unlocked(spi, message);
mutex_unlock(&spidev->spi_lock);
-
return status;
}
@@ -712,6 +711,8 @@ static const struct spi_device_id spidev_spi_ids[] = {
{ .name = "m53cpld" },
{ .name = "spi-petra" },
{ .name = "spi-authenta" },
+ { .name = "em3581" },
+ { .name = "si3210" },
{},
};
MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
@@ -730,14 +731,16 @@ static int spidev_of_check(struct device *dev)
}
static const struct of_device_id spidev_dt_ids[] = {
- { .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
+ { .compatible = "cisco,spi-petra", .data = &spidev_of_check },
+ { .compatible = "dh,dhcom-board", .data = &spidev_of_check },
{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
- { .compatible = "semtech,sx1301", .data = &spidev_of_check },
{ .compatible = "lwn,bk4", .data = &spidev_of_check },
- { .compatible = "dh,dhcom-board", .data = &spidev_of_check },
{ .compatible = "menlo,m53cpld", .data = &spidev_of_check },
- { .compatible = "cisco,spi-petra", .data = &spidev_of_check },
{ .compatible = "micron,spi-authenta", .data = &spidev_of_check },
+ { .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
+ { .compatible = "semtech,sx1301", .data = &spidev_of_check },
+ { .compatible = "silabs,em3581", .data = &spidev_of_check },
+ { .compatible = "silabs,si3210", .data = &spidev_of_check },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);