summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-25 15:38:03 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-25 15:38:03 -0800
commitd7930c9ef9cc67044f5ddaac54d06ca22645a012 (patch)
tree32ec9ed98a7b5ff659de11886492b18abd421069
parentb6d97026561a6ed6eed58428633a6bb4e1b78c57 (diff)
parent4f4517c45f325ba511458465430a52864a5d0d30 (diff)
Merge branch 'next-spi' of git://git.secretlab.ca/git/linux-2.6
* 'next-spi' of git://git.secretlab.ca/git/linux-2.6: (31 commits) spi: Correct SPI clock frequency setting in spi_mpc8xxx spi/spi_s3c64xx.c: Fix continuation line formats spi/dw_spi: Fix dw_spi_mmio to depend on HAVE_CLK spi/dw_spi: Allow dw_spi.c to be a module spi/dw_spi: mmio code style fixups Memory-mapped dw_spi driver spi/dw_spi: fix missing export of dw_spi_remove_host spi/dw_spi: conditional transfer mode changes spi/dw_spi: remove conditional from 'poll_transfer'. spi/dw_spi: fixed a spelling typo in a warning message. spi/dw_spi: add return value to empty mrst_spi_debugfs_init() spi/dw_spi: enable platform specific chipselect. spi/dw_spi: add a FIFO depth detection spi/dw_spi: fix __init/__devinit section mismatch spi: xilinx_spi: Fix up I/O routine wrapping bogosity. spi/spi_imx: add device information by switching pr_debug() to dev_dbg() spi: update MSIOF includes spi/dw_spi: refine the IRQ mode working flow spi/dw_spi: add a missed dw_spi_remove_host() in exit sequence spi/dw_spi: bug fix in wait_till_not_busy() ...
-rw-r--r--drivers/spi/Kconfig23
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/coldfire_qspi.c640
-rw-r--r--drivers/spi/davinci_spi.c1255
-rw-r--r--drivers/spi/dw_spi.c111
-rw-r--r--drivers/spi/dw_spi_mmio.c147
-rw-r--r--drivers/spi/dw_spi_pci.c2
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c2
-rw-r--r--drivers/spi/mpc52xx_spi.c2
-rw-r--r--drivers/spi/spi_imx.c2
-rw-r--r--drivers/spi/spi_mpc8xxx.c8
-rw-r--r--drivers/spi/spi_ppc4xx.c2
-rw-r--r--drivers/spi/spi_s3c64xx.c89
-rw-r--r--drivers/spi/spi_sh_msiof.c2
-rw-r--r--drivers/spi/spi_stmp.c2
-rw-r--r--drivers/spi/xilinx_spi.c28
-rw-r--r--drivers/spi/xilinx_spi_of.c2
-rw-r--r--include/linux/spi/dw_spi.h5
18 files changed, 2225 insertions, 100 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f55eb0107336..0fee95cd9a49 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -100,6 +100,23 @@ config SPI_BUTTERFLY
inexpensive battery powered microcontroller evaluation board.
This same cable can be used to flash new firmware.
+config SPI_COLDFIRE_QSPI
+ tristate "Freescale Coldfire QSPI controller"
+ depends on (M520x || M523x || M5249 || M527x || M528x || M532x)
+ help
+ This enables support for the Coldfire QSPI controller in master
+ mode.
+
+ This driver can also be built as a module. If so, the module
+ will be called coldfire_qspi.
+
+config SPI_DAVINCI
+ tristate "SPI controller driver for DaVinci/DA8xx SoC's"
+ depends on SPI_MASTER && ARCH_DAVINCI
+ select SPI_BITBANG
+ help
+ SPI master controller for DaVinci and DA8xx SPI modules.
+
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
depends on GENERIC_GPIO
@@ -308,7 +325,7 @@ config SPI_NUC900
#
config SPI_DESIGNWARE
- bool "DesignWare SPI controller core support"
+ tristate "DesignWare SPI controller core support"
depends on SPI_MASTER
help
general driver for SPI controller core from DesignWare
@@ -317,6 +334,10 @@ config SPI_DW_PCI
tristate "PCI interface driver for DW SPI core"
depends on SPI_DESIGNWARE && PCI
+config SPI_DW_MMIO
+ tristate "Memory-mapped io interface driver for DW SPI core"
+ depends on SPI_DESIGNWARE && HAVE_CLK
+
#
# There are lots of SPI device types, with sensors and memory
# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index f3d2810ba11c..d7d0f89b797b 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,8 +16,11 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
+obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o
+obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o
obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o
+obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o
obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
obj-$(CONFIG_SPI_IMX) += spi_imx.o
obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c
new file mode 100644
index 000000000000..59be3efe0636
--- /dev/null
+++ b/drivers/spi/coldfire_qspi.c
@@ -0,0 +1,640 @@
+/*
+ * Freescale/Motorola Coldfire Queued SPI driver
+ *
+ * Copyright 2010 Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
+ *
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfqspi.h>
+
+#define DRIVER_NAME "mcfqspi"
+
+#define MCFQSPI_BUSCLK (MCF_BUSCLK / 2)
+
+#define MCFQSPI_QMR 0x00
+#define MCFQSPI_QMR_MSTR 0x8000
+#define MCFQSPI_QMR_CPOL 0x0200
+#define MCFQSPI_QMR_CPHA 0x0100
+#define MCFQSPI_QDLYR 0x04
+#define MCFQSPI_QDLYR_SPE 0x8000
+#define MCFQSPI_QWR 0x08
+#define MCFQSPI_QWR_HALT 0x8000
+#define MCFQSPI_QWR_WREN 0x4000
+#define MCFQSPI_QWR_CSIV 0x1000
+#define MCFQSPI_QIR 0x0C
+#define MCFQSPI_QIR_WCEFB 0x8000
+#define MCFQSPI_QIR_ABRTB 0x4000
+#define MCFQSPI_QIR_ABRTL 0x1000
+#define MCFQSPI_QIR_WCEFE 0x0800
+#define MCFQSPI_QIR_ABRTE 0x0400
+#define MCFQSPI_QIR_SPIFE 0x0100
+#define MCFQSPI_QIR_WCEF 0x0008
+#define MCFQSPI_QIR_ABRT 0x0004
+#define MCFQSPI_QIR_SPIF 0x0001
+#define MCFQSPI_QAR 0x010
+#define MCFQSPI_QAR_TXBUF 0x00
+#define MCFQSPI_QAR_RXBUF 0x10
+#define MCFQSPI_QAR_CMDBUF 0x20
+#define MCFQSPI_QDR 0x014
+#define MCFQSPI_QCR 0x014
+#define MCFQSPI_QCR_CONT 0x8000
+#define MCFQSPI_QCR_BITSE 0x4000
+#define MCFQSPI_QCR_DT 0x2000
+
+struct mcfqspi {
+ void __iomem *iobase;
+ int irq;
+ struct clk *clk;
+ struct mcfqspi_cs_control *cs_control;
+
+ wait_queue_head_t waitq;
+
+ struct work_struct work;
+ struct workqueue_struct *workq;
+ spinlock_t lock;
+ struct list_head msgq;
+};
+
+static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QMR);
+}
+
+static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QDLYR);
+}
+
+static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi)
+{
+ return readw(mcfqspi->iobase + MCFQSPI_QDLYR);
+}
+
+static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QWR);
+}
+
+static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QIR);
+}
+
+static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QAR);
+}
+
+static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QDR);
+}
+
+static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi)
+{
+ return readw(mcfqspi->iobase + MCFQSPI_QDR);
+}
+
+static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select,
+ bool cs_high)
+{
+ mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high);
+}
+
+static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select,
+ bool cs_high)
+{
+ mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high);
+}
+
+static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi)
+{
+ return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ?
+ mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0;
+}
+
+static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi)
+{
+ if (mcfqspi->cs_control && mcfqspi->cs_control->teardown)
+ mcfqspi->cs_control->teardown(mcfqspi->cs_control);
+}
+
+static u8 mcfqspi_qmr_baud(u32 speed_hz)
+{
+ return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u);
+}
+
+static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi)
+{
+ return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE;
+}
+
+static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id)
+{
+ struct mcfqspi *mcfqspi = dev_id;
+
+ /* clear interrupt */
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF);
+ wake_up(&mcfqspi->waitq);
+
+ return IRQ_HANDLED;
+}
+
+static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count,
+ const u8 *txbuf, u8 *rxbuf)
+{
+ unsigned i, n, offset = 0;
+
+ n = min(count, 16u);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
+ if (txbuf)
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ else
+ for (i = 0; i < count; ++i)
+ mcfqspi_wr_qdr(mcfqspi, 0);
+
+ count -= n;
+ if (count) {
+ u16 qwr = 0xf08;
+ mcfqspi_wr_qwr(mcfqspi, 0x700);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+
+ do {
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+ n = min(count, 8u);
+ if (txbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_TXBUF + offset);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ }
+ qwr = (offset ? 0x808 : 0) + ((n - 1) << 8);
+ offset ^= 8;
+ count -= n;
+ } while (count);
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ offset ^= 8;
+ }
+ } else {
+ mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ }
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < n; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+}
+
+static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
+ const u16 *txbuf, u16 *rxbuf)
+{
+ unsigned i, n, offset = 0;
+
+ n = min(count, 16u);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
+ if (txbuf)
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ else
+ for (i = 0; i < count; ++i)
+ mcfqspi_wr_qdr(mcfqspi, 0);
+
+ count -= n;
+ if (count) {
+ u16 qwr = 0xf08;
+ mcfqspi_wr_qwr(mcfqspi, 0x700);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+
+ do {
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+ n = min(count, 8u);
+ if (txbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_TXBUF + offset);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ }
+ qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8);
+ offset ^= 8;
+ count -= n;
+ } while (count);
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ offset ^= 8;
+ }
+ } else {
+ mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ }
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < n; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+}
+
+static void mcfqspi_work(struct work_struct *work)
+{
+ struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcfqspi->lock, flags);
+ while (!list_empty(&mcfqspi->msgq)) {
+ struct spi_message *msg;
+ struct spi_device *spi;
+ struct spi_transfer *xfer;
+ int status = 0;
+
+ msg = container_of(mcfqspi->msgq.next, struct spi_message,
+ queue);
+
+ list_del_init(&mcfqspi->msgq);
+ spin_unlock_irqrestore(&mcfqspi->lock, flags);
+
+ spi = msg->spi;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ bool cs_high = spi->mode & SPI_CS_HIGH;
+ u16 qmr = MCFQSPI_QMR_MSTR;
+
+ if (xfer->bits_per_word)
+ qmr |= xfer->bits_per_word << 10;
+ else
+ qmr |= spi->bits_per_word << 10;
+ if (spi->mode & SPI_CPHA)
+ qmr |= MCFQSPI_QMR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ qmr |= MCFQSPI_QMR_CPOL;
+ if (xfer->speed_hz)
+ qmr |= mcfqspi_qmr_baud(xfer->speed_hz);
+ else
+ qmr |= mcfqspi_qmr_baud(spi->max_speed_hz);
+ mcfqspi_wr_qmr(mcfqspi, qmr);
+
+ mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
+
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
+ if ((xfer->bits_per_word ? xfer->bits_per_word :
+ spi->bits_per_word) == 8)
+ mcfqspi_transfer_msg8(mcfqspi, xfer->len,
+ xfer->tx_buf,
+ xfer->rx_buf);
+ else
+ mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2,
+ xfer->tx_buf,
+ xfer->rx_buf);
+ mcfqspi_wr_qir(mcfqspi, 0);
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+ if (xfer->cs_change) {
+ if (!list_is_last(&xfer->transfer_list,
+ &msg->transfers))
+ mcfqspi_cs_deselect(mcfqspi,
+ spi->chip_select,
+ cs_high);
+ } else {
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers))
+ mcfqspi_cs_deselect(mcfqspi,
+ spi->chip_select,
+ cs_high);
+ }
+ msg->actual_length += xfer->len;
+ }
+ msg->status = status;
+ msg->complete(msg->context);
+
+ spin_lock_irqsave(&mcfqspi->lock, flags);
+ }
+ spin_unlock_irqrestore(&mcfqspi->lock, flags);
+}
+
+static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct mcfqspi *mcfqspi;
+ struct spi_transfer *xfer;
+ unsigned long flags;
+
+ mcfqspi = spi_master_get_devdata(spi->master);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->bits_per_word && ((xfer->bits_per_word < 8)
+ || (xfer->bits_per_word > 16))) {
+ dev_dbg(&spi->dev,
+ "%d bits per word is not supported\n",
+ xfer->bits_per_word);
+ goto fail;
+ }
+ if (xfer->speed_hz) {
+ u32 real_speed = MCFQSPI_BUSCLK /
+ mcfqspi_qmr_baud(xfer->speed_hz);
+ if (real_speed != xfer->speed_hz)
+ dev_dbg(&spi->dev,
+ "using speed %d instead of %d\n",
+ real_speed, xfer->speed_hz);
+ }
+ }
+ msg->status = -EINPROGRESS;
+ msg->actual_length = 0;
+
+ spin_lock_irqsave(&mcfqspi->lock, flags);
+ list_add_tail(&msg->queue, &mcfqspi->msgq);
+ queue_work(mcfqspi->workq, &mcfqspi->work);
+ spin_unlock_irqrestore(&mcfqspi->lock, flags);
+
+ return 0;
+fail:
+ msg->status = -EINVAL;
+ return -EINVAL;
+}
+
+static int mcfqspi_setup(struct spi_device *spi)
+{
+ if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) {
+ dev_dbg(&spi->dev, "%d bits per word is not supported\n",
+ spi->bits_per_word);
+ return -EINVAL;
+ }
+ if (spi->chip_select >= spi->master->num_chipselect) {
+ dev_dbg(&spi->dev, "%d chip select is out of range\n",
+ spi->chip_select);
+ return -EINVAL;
+ }
+
+ mcfqspi_cs_deselect(spi_master_get_devdata(spi->master),
+ spi->chip_select, spi->mode & SPI_CS_HIGH);
+
+ dev_dbg(&spi->dev,
+ "bits per word %d, chip select %d, speed %d KHz\n",
+ spi->bits_per_word, spi->chip_select,
+ (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz))
+ / 1000);
+
+ return 0;
+}
+
+static int __devinit mcfqspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct mcfqspi *mcfqspi;
+ struct resource *res;
+ struct mcfqspi_platform_data *pdata;
+ int status;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi));
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "spi_alloc_master failed\n");
+ return -ENOMEM;
+ }
+
+ mcfqspi = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(&pdev->dev, "platform_get_resource failed\n");
+ status = -ENXIO;
+ goto fail0;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_dbg(&pdev->dev, "request_mem_region failed\n");
+ status = -EBUSY;
+ goto fail0;
+ }
+
+ mcfqspi->iobase = ioremap(res->start, resource_size(res));
+ if (!mcfqspi->iobase) {
+ dev_dbg(&pdev->dev, "ioremap failed\n");
+ status = -ENOMEM;
+ goto fail1;
+ }
+
+ mcfqspi->irq = platform_get_irq(pdev, 0);
+ if (mcfqspi->irq < 0) {
+ dev_dbg(&pdev->dev, "platform_get_irq failed\n");
+ status = -ENXIO;
+ goto fail2;
+ }
+
+ status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED,
+ pdev->name, mcfqspi);
+ if (status) {
+ dev_dbg(&pdev->dev, "request_irq failed\n");
+ goto fail2;
+ }
+
+ mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk");
+ if (IS_ERR(mcfqspi->clk)) {
+ dev_dbg(&pdev->dev, "clk_get failed\n");
+ status = PTR_ERR(mcfqspi->clk);
+ goto fail3;
+ }
+ clk_enable(mcfqspi->clk);
+
+ mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent));
+ if (!mcfqspi->workq) {
+ dev_dbg(&pdev->dev, "create_workqueue failed\n");
+ status = -ENOMEM;
+ goto fail4;
+ }
+ INIT_WORK(&mcfqspi->work, mcfqspi_work);
+ spin_lock_init(&mcfqspi->lock);
+ INIT_LIST_HEAD(&mcfqspi->msgq);
+ init_waitqueue_head(&mcfqspi->waitq);
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "platform data is missing\n");
+ goto fail5;
+ }
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->num_chipselect;
+
+ mcfqspi->cs_control = pdata->cs_control;
+ status = mcfqspi_cs_setup(mcfqspi);
+ if (status) {
+ dev_dbg(&pdev->dev, "error initializing cs_control\n");
+ goto fail5;
+ }
+
+ master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
+ master->setup = mcfqspi_setup;
+ master->transfer = mcfqspi_transfer;
+
+ platform_set_drvdata(pdev, master);
+
+ status = spi_register_master(master);
+ if (status) {
+ dev_dbg(&pdev->dev, "spi_register_master failed\n");
+ goto fail6;
+ }
+ dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
+
+ return 0;
+
+fail6:
+ mcfqspi_cs_teardown(mcfqspi);
+fail5:
+ destroy_workqueue(mcfqspi->workq);
+fail4:
+ clk_disable(mcfqspi->clk);
+ clk_put(mcfqspi->clk);
+fail3:
+ free_irq(mcfqspi->irq, mcfqspi);
+fail2:
+ iounmap(mcfqspi->iobase);
+fail1:
+ release_mem_region(res->start, resource_size(res));
+fail0:
+ spi_master_put(master);
+
+ dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n");
+
+ return status;
+}
+
+static int __devexit mcfqspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* disable the hardware (set the baud rate to 0) */
+ mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
+
+ platform_set_drvdata(pdev, NULL);
+ mcfqspi_cs_teardown(mcfqspi);
+ destroy_workqueue(mcfqspi->workq);
+ clk_disable(mcfqspi->clk);
+ clk_put(mcfqspi->clk);
+ free_irq(mcfqspi->irq, mcfqspi);
+ iounmap(mcfqspi->iobase);
+ release_mem_region(res->start, resource_size(res));
+ spi_unregister_master(master);
+ spi_master_put(master);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int mcfqspi_suspend(struct device *dev)
+{
+ struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+
+ clk_disable(mcfqspi->clk);
+
+ return 0;
+}
+
+static int mcfqspi_resume(struct device *dev)
+{
+ struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+
+ clk_enable(mcfqspi->clk);
+
+ return 0;
+}
+
+static struct dev_pm_ops mcfqspi_dev_pm_ops = {
+ .suspend = mcfqspi_suspend,
+ .resume = mcfqspi_resume,
+};
+
+#define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops)
+#else
+#define MCFQSPI_DEV_PM_OPS NULL
+#endif
+
+static struct platform_driver mcfqspi_driver = {
+ .driver.name = DRIVER_NAME,
+ .driver.owner = THIS_MODULE,
+ .driver.pm = MCFQSPI_DEV_PM_OPS,
+ .remove = __devexit_p(mcfqspi_remove),
+};
+
+static int __init mcfqspi_init(void)
+{
+ return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe);
+}
+module_init(mcfqspi_init);
+
+static void __exit mcfqspi_exit(void)
+{
+ platform_driver_unregister(&mcfqspi_driver);
+}
+module_exit(mcfqspi_exit);
+
+MODULE_AUTHOR("Steven King <sfking@fdwdc.com>");
+MODULE_DESCRIPTION("Coldfire QSPI Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c
new file mode 100644
index 000000000000..225ab60b02c4
--- /dev/null
+++ b/drivers/spi/davinci_spi.c
@@ -0,0 +1,1255 @@
+/*
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <mach/spi.h>
+#include <mach/edma.h>
+
+#define SPI_NO_RESOURCE ((resource_size_t)-1)
+
+#define SPI_MAX_CHIPSELECT 2
+
+#define CS_DEFAULT 0xFF
+
+#define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
+#define DAVINCI_DMA_DATA_TYPE_S8 0x01
+#define DAVINCI_DMA_DATA_TYPE_S16 0x02
+#define DAVINCI_DMA_DATA_TYPE_S32 0x04
+
+#define SPIFMT_PHASE_MASK BIT(16)
+#define SPIFMT_POLARITY_MASK BIT(17)
+#define SPIFMT_DISTIMER_MASK BIT(18)
+#define SPIFMT_SHIFTDIR_MASK BIT(20)
+#define SPIFMT_WAITENA_MASK BIT(21)
+#define SPIFMT_PARITYENA_MASK BIT(22)
+#define SPIFMT_ODD_PARITY_MASK BIT(23)
+#define SPIFMT_WDELAY_MASK 0x3f000000u
+#define SPIFMT_WDELAY_SHIFT 24
+#define SPIFMT_CHARLEN_MASK 0x0000001Fu
+
+/* SPIGCR1 */
+#define SPIGCR1_SPIENA_MASK 0x01000000u
+
+/* SPIPC0 */
+#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
+#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
+#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
+#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
+#define SPIPC0_EN1FUN_MASK BIT(1)
+#define SPIPC0_EN0FUN_MASK BIT(0)
+
+#define SPIINT_MASKALL 0x0101035F
+#define SPI_INTLVL_1 0x000001FFu
+#define SPI_INTLVL_0 0x00000000u
+
+/* SPIDAT1 */
+#define SPIDAT1_CSHOLD_SHIFT 28
+#define SPIDAT1_CSNR_SHIFT 16
+#define SPIGCR1_CLKMOD_MASK BIT(1)
+#define SPIGCR1_MASTER_MASK BIT(0)
+#define SPIGCR1_LOOPBACK_MASK BIT(16)
+
+/* SPIBUF */
+#define SPIBUF_TXFULL_MASK BIT(29)
+#define SPIBUF_RXEMPTY_MASK BIT(31)
+
+/* Error Masks */
+#define SPIFLG_DLEN_ERR_MASK BIT(0)
+#define SPIFLG_TIMEOUT_MASK BIT(1)
+#define SPIFLG_PARERR_MASK BIT(2)
+#define SPIFLG_DESYNC_MASK BIT(3)
+#define SPIFLG_BITERR_MASK BIT(4)
+#define SPIFLG_OVRRUN_MASK BIT(6)
+#define SPIFLG_RX_INTR_MASK BIT(8)
+#define SPIFLG_TX_INTR_MASK BIT(9)
+#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
+#define SPIFLG_MASK (SPIFLG_DLEN_ERR_MASK \
+ | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
+ | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
+ | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \
+ | SPIFLG_TX_INTR_MASK \
+ | SPIFLG_BUF_INIT_ACTIVE_MASK)
+
+#define SPIINT_DLEN_ERR_INTR BIT(0)
+#define SPIINT_TIMEOUT_INTR BIT(1)
+#define SPIINT_PARERR_INTR BIT(2)
+#define SPIINT_DESYNC_INTR BIT(3)
+#define SPIINT_BITERR_INTR BIT(4)
+#define SPIINT_OVRRUN_INTR BIT(6)
+#define SPIINT_RX_INTR BIT(8)
+#define SPIINT_TX_INTR BIT(9)
+#define SPIINT_DMA_REQ_EN BIT(16)
+#define SPIINT_ENABLE_HIGHZ BIT(24)
+
+#define SPI_T2CDELAY_SHIFT 16
+#define SPI_C2TDELAY_SHIFT 24
+
+/* SPI Controller registers */
+#define SPIGCR0 0x00
+#define SPIGCR1 0x04
+#define SPIINT 0x08
+#define SPILVL 0x0c
+#define SPIFLG 0x10
+#define SPIPC0 0x14
+#define SPIPC1 0x18
+#define SPIPC2 0x1c
+#define SPIPC3 0x20
+#define SPIPC4 0x24
+#define SPIPC5 0x28
+#define SPIPC6 0x2c
+#define SPIPC7 0x30
+#define SPIPC8 0x34
+#define SPIDAT0 0x38
+#define SPIDAT1 0x3c
+#define SPIBUF 0x40
+#define SPIEMU 0x44
+#define SPIDELAY 0x48
+#define SPIDEF 0x4c
+#define SPIFMT0 0x50
+#define SPIFMT1 0x54
+#define SPIFMT2 0x58
+#define SPIFMT3 0x5c
+#define TGINTVEC0 0x60
+#define TGINTVEC1 0x64
+
+struct davinci_spi_slave {
+ u32 cmd_to_write;
+ u32 clk_ctrl_to_write;
+ u32 bytes_per_word;
+ u8 active_cs;
+};
+
+/* We have 2 DMA channels per CS, one for RX and one for TX */
+struct davinci_spi_dma {
+ int dma_tx_channel;
+ int dma_rx_channel;
+ int dma_tx_sync_dev;
+ int dma_rx_sync_dev;
+ enum dma_event_q eventq;
+
+ struct completion dma_tx_completion;
+ struct completion dma_rx_completion;
+};
+
+/* SPI Controller driver's private data. */
+struct davinci_spi {
+ struct spi_bitbang bitbang;
+ struct clk *clk;
+
+ u8 version;
+ resource_size_t pbase;
+ void __iomem *base;
+ size_t region_size;
+ u32 irq;
+ struct completion done;
+
+ const void *tx;
+ void *rx;
+ u8 *tmp_buf;
+ int count;
+ struct davinci_spi_dma *dma_channels;
+ struct davinci_spi_platform_data *pdata;
+
+ void (*get_rx)(u32 rx_data, struct davinci_spi *);
+ u32 (*get_tx)(struct davinci_spi *);
+
+ struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
+};
+
+static unsigned use_dma;
+
+static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
+{
+ u8 *rx = davinci_spi->rx;
+
+ *rx++ = (u8)data;
+ davinci_spi->rx = rx;
+}
+
+static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
+{
+ u16 *rx = davinci_spi->rx;
+
+ *rx++ = (u16)data;
+ davinci_spi->rx = rx;
+}
+
+static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
+{
+ u32 data;
+ const u8 *tx = davinci_spi->tx;
+
+ data = *tx++;
+ davinci_spi->tx = tx;
+ return data;
+}
+
+static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
+{
+ u32 data;
+ const u16 *tx = davinci_spi->tx;
+
+ data = *tx++;
+ davinci_spi->tx = tx;
+ return data;
+}
+
+static inline void set_io_bits(void __iomem *addr, u32 bits)
+{
+ u32 v = ioread32(addr);
+
+ v |= bits;
+ iowrite32(v, addr);
+}
+
+static inline void clear_io_bits(void __iomem *addr, u32 bits)
+{
+ u32 v = ioread32(addr);
+
+ v &= ~bits;
+ iowrite32(v, addr);
+}
+
+static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
+{
+ set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
+}
+
+static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
+{
+ clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
+}
+
+static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
+{
+ struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
+
+ if (enable)
+ set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
+ else
+ clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
+}
+
+/*
+ * Interface to control the chip select signal
+ */
+static void davinci_spi_chipselect(struct spi_device *spi, int value)
+{
+ struct davinci_spi *davinci_spi;
+ struct davinci_spi_platform_data *pdata;
+ u32 data1_reg_val = 0;
+
+ davinci_spi = spi_master_get_devdata(spi->master);
+ pdata = davinci_spi->pdata;
+
+ /*
+ * Board specific chip select logic decides the polarity and cs
+ * line for the controller
+ */
+ if (value == BITBANG_CS_INACTIVE) {
+ set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT);
+
+ data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT;
+ iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
+
+ while ((ioread32(davinci_spi->base + SPIBUF)
+ & SPIBUF_RXEMPTY_MASK) == 0)
+ cpu_relax();
+ }
+}
+
+/**
+ * davinci_spi_setup_transfer - This functions will determine transfer method
+ * @spi: spi device on which data transfer to be done
+ * @t: spi transfer in which transfer info is filled
+ *
+ * This function determines data transfer method (8/16/32 bit transfer).
+ * It will also set the SPI Clock Control register according to
+ * SPI slave device freq.
+ */
+static int davinci_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+
+ struct davinci_spi *davinci_spi;
+ struct davinci_spi_platform_data *pdata;
+ u8 bits_per_word = 0;
+ u32 hz = 0, prescale;
+
+ davinci_spi = spi_master_get_devdata(spi->master);
+ pdata = davinci_spi->pdata;
+
+ if (t) {
+ bits_per_word = t->bits_per_word;
+ hz = t->speed_hz;
+ }
+
+ /* if bits_per_word is not set then set it default */
+ if (!bits_per_word)
+ bits_per_word = spi->bits_per_word;
+
+ /*
+ * Assign function pointer to appropriate transfer method
+ * 8bit, 16bit or 32bit transfer
+ */
+ if (bits_per_word <= 8 && bits_per_word >= 2) {
+ davinci_spi->get_rx = davinci_spi_rx_buf_u8;
+ davinci_spi->get_tx = davinci_spi_tx_buf_u8;
+ davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
+ } else if (bits_per_word <= 16 && bits_per_word >= 2) {
+ davinci_spi->get_rx = davinci_spi_rx_buf_u16;
+ davinci_spi->get_tx = davinci_spi_tx_buf_u16;
+ davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
+ } else
+ return -EINVAL;
+
+ if (!hz)
+ hz = spi->max_speed_hz;
+
+ clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK,
+ spi->chip_select);
+ set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
+ spi->chip_select);
+
+ prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff;
+
+ clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
+ set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select);
+
+ return 0;
+}
+
+static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
+{
+ struct spi_device *spi = (struct spi_device *)data;
+ struct davinci_spi *davinci_spi;
+ struct davinci_spi_dma *davinci_spi_dma;
+ struct davinci_spi_platform_data *pdata;
+
+ davinci_spi = spi_master_get_devdata(spi->master);
+ davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
+ pdata = davinci_spi->pdata;
+
+ if (ch_status == DMA_COMPLETE)
+ edma_stop(davinci_spi_dma->dma_rx_channel);
+ else
+ edma_clean_channel(davinci_spi_dma->dma_rx_channel);
+
+ complete(&davinci_spi_dma->dma_rx_completion);
+ /* We must disable the DMA RX request */
+ davinci_spi_set_dma_req(spi, 0);
+}
+
+static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
+{
+ struct spi_device *spi = (struct spi_device *)data;
+ struct davinci_spi *davinci_spi;
+ struct davinci_spi_dma *davinci_spi_dma;
+ struct davinci_spi_platform_data *pdata;
+
+ davinci_spi = spi_master_get_devdata(spi->master);
+ davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
+ pdata = davinci_spi->pdata;
+
+ if (ch_status == DMA_COMPLETE)
+ edma_stop(davinci_spi_dma->dma_tx_channel);
+ else
+ edma_clean_channel(davinci_spi_dma->dma_tx_channel);
+
+ complete(&davinci_spi_dma->dma_tx_completion);
+ /* We must disable the DMA TX request */
+ davinci_spi_set_dma_req(spi, 0);
+}
+
+static int davinci_spi_request_dma(struct spi_device *spi)
+{
+ struct davinci_spi *davinci_spi;
+ struct davinci_spi_dma *davinci_spi_dma;
+ struct davinci_spi_platform_data *pdata;
+ struct device *sdev;
+ int r;
+
+ davinci_spi = spi_master_get_devdata(spi->master);
+ davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
+ pdata = davinci_spi->pdata;
+ sdev = davinci_spi->bitbang.master->dev.parent;
+
+ r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
+ davinci_spi_dma_rx_callback, spi,
+ davinci_spi_dma->eventq);
+ if (r < 0) {
+ dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
+ return -EAGAIN;
+ }
+ davinci_spi_dma->dma_rx_channel = r;
+ r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
+ davinci_spi_dma_tx_callback, spi,
+ davinci_spi_dma->eventq);
+ if (r < 0) {
+ edma_free_channel(davinci_spi_dma->dma_rx_channel);
+ davinci_spi_dma->dma_rx_channel = -1;
+ dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
+ return -EAGAIN;
+ }
+ davinci_spi_dma->dma_tx_channel = r;
+
+ return 0;
+}
+
+/**
+ * davinci_spi_setup - This functions will set default transfer method
+ * @spi: spi device on which data transfer to be done
+ *
+ * This functions sets the default transfer method.
+ */
+
+static int davinci_spi_setup(struct spi_device *spi)
+{
+ int retval;
+ struct davinci_spi *davinci_spi;
+ struct davinci_spi_dma *davinci_spi_dma;
+ struct device *sdev;
+
+ davinci_spi = spi_master_get_devdata(spi->master);
+ sdev = davinci_spi->bitbang.master->dev.parent;
+
+ /* if bits per word length is zero then set it default 8 */
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
+
+ if (use_dma && davinci_spi->dma_channels) {
+ davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
+
+ if ((davinci_spi_dma->dma_rx_channel == -1)
+ || (davinci_spi_dma->dma_tx_channel == -1)) {
+ retval = davinci_spi_request_dma(spi);
+ if (retval < 0)
+ return retval;
+ }
+ }
+
+ /*
+ * SPI in DaVinci and DA8xx operate between
+ * 600 KHz and 50 MHz
+ */
+ if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) {
+ dev_dbg(sdev, "Operating frequency is not in acceptable "
+ "range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Set up SPIFMTn register, unique to this chipselect.
+ *
+ * NOTE: we could do all of these with one write. Also, some
+ * of the "version 2" features are found in chips that don't
+ * support all of them...
+ */
+ if (spi->mode & SPI_LSB_FIRST)
+ set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
+ spi->chip_select);
+ else
+ clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
+ spi->chip_select);
+
+ if (spi->mode & SPI_CPOL)
+ set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
+ spi->chip_select);
+ else
+ clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
+ spi->chip_select);
+
+ if (!(spi->mode & SPI_CPHA))
+ set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
+ spi->chip_select);
+ else
+ clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
+ spi->chip_select);
+
+ /*
+ * Version 1 hardware supports two basic SPI modes:
+ * - Standard SPI mode uses 4 pins, with chipselect
+ * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
+ * (distinct from SPI_3WIRE, with just one data wire;
+ * or similar variants without MOSI or without MISO)
+ *
+ * Version 2 hardware supports an optional handshaking signal,
+ * so it can support two more modes:
+ * - 5 pin SPI variant is standard SPI plus SPI_READY
+ * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
+ */
+
+ if (davinci_spi->version == SPI_VERSION_2) {
+ clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
+ spi->chip_select);
+ set_fmt_bits(davinci_spi->base,
+ (davinci_spi->pdata->wdelay
+ << SPIFMT_WDELAY_SHIFT)
+ & SPIFMT_WDELAY_MASK,
+ spi->chip_select);
+
+ if (davinci_spi->pdata->odd_parity)
+ set_fmt_bits(davinci_spi->base,
+ SPIFMT_ODD_PARITY_MASK,
+ spi->chip_select);
+ else
+ clear_fmt_bits(davinci_spi->base,
+ SPIFMT_ODD_PARITY_MASK,
+ spi->chip_select);
+
+ if (davinci_spi->pdata->parity_enable)
+ set_fmt_bits(davinci_spi->base,
+ SPIFMT_PARITYENA_MASK,
+ spi->chip_select);
+ else
+ clear_fmt_bits(davinci_spi->base,
+ SPIFMT_PARITYENA_MASK,
+ spi->chip_select);
+
+ if (davinci_spi->pdata->wait_enable)
+ set_fmt_bits(davinci_spi->base,
+ SPIFMT_WAITENA_MASK,
+ spi->chip_select);
+ else
+ clear_fmt_bits(davinci_spi->base,
+ SPIFMT_WAITENA_MASK,
+ spi->chip_select);
+
+ if (davinci_spi->pdata->timer_disable)
+ set_fmt_bits(davinci_spi->base,
+ SPIFMT_DISTIMER_MASK,
+ spi->chip_select);
+ else
+ clear_fmt_bits(davinci_spi->base,
+ SPIFMT_DISTIMER_MASK,
+ spi->chip_select);
+ }
+
+ retval = davinci_spi_setup_transfer(spi, NULL);
+
+ return retval;
+}
+
+static void davinci_spi_cleanup(struct spi_device *spi)
+{
+ struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
+ struct davinci_spi_dma *davinci_spi_dma;
+
+ davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
+
+ if (use_dma && davinci_spi->dma_channels) {
+ davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
+
+ if ((davinci_spi_dma->dma_rx_channel != -1)
+ && (davinci_spi_dma->dma_tx_channel != -1)) {
+ edma_free_channel(davinci_spi_dma->dma_tx_channel);
+ edma_free_channel(davinci_spi_dma->dma_rx_channel);
+ }
+ }
+}
+
+static int davinci_spi_bufs_prep(struct spi_device *spi,
+ struct davinci_spi *davinci_spi)
+{
+ int op_mode = 0;
+
+ /*
+ * REVISIT unless devices disagree about SPI_LOOP or
+ * SPI_READY (SPI_NO_CS only allows one device!), this
+ * should not need to be done before each message...
+ * optimize for both flags staying cleared.
+ */
+
+ op_mode = SPIPC0_DIFUN_MASK
+ | SPIPC0_DOFUN_MASK
+ | SPIPC0_CLKFUN_MASK;
+ if (!(spi->mode & SPI_NO_CS))
+ op_mode |= 1 << spi->chip_select;
+ if (spi->mode & SPI_READY)
+ op_mode |= SPIPC0_SPIENA_MASK;
+
+ iowrite32(op_mode, davinci_spi->base + SPIPC0);
+
+ if (spi->mode & SPI_LOOP)
+ set_io_bits(davinci_spi->base + SPIGCR1,
+ SPIGCR1_LOOPBACK_MASK);
+ else
+ clear_io_bits(davinci_spi->base + SPIGCR1,
+ SPIGCR1_LOOPBACK_MASK);
+
+ return 0;
+}
+
+static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
+ int int_status)
+{
+ struct device *sdev = davinci_spi->bitbang.master->dev.parent;
+
+ if (int_status & SPIFLG_TIMEOUT_MASK) {
+ dev_dbg(sdev, "SPI Time-out Error\n");
+ return -ETIMEDOUT;
+ }
+ if (int_status & SPIFLG_DESYNC_MASK) {
+ dev_dbg(sdev, "SPI Desynchronization Error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_BITERR_MASK) {
+ dev_dbg(sdev, "SPI Bit error\n");
+ return -EIO;
+ }
+
+ if (davinci_spi->version == SPI_VERSION_2) {
+ if (int_status & SPIFLG_DLEN_ERR_MASK) {
+ dev_dbg(sdev, "SPI Data Length Error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_PARERR_MASK) {
+ dev_dbg(sdev, "SPI Parity Error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_OVRRUN_MASK) {
+ dev_dbg(sdev, "SPI Data Overrun error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_TX_INTR_MASK) {
+ dev_dbg(sdev, "SPI TX intr bit set\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
+ dev_dbg(sdev, "SPI Buffer Init Active\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * davinci_spi_bufs - functions which will handle transfer data
+ * @spi: spi device on which data transfer to be done
+ * @t: spi transfer in which transfer info is filled
+ *
+ * This function will put data to be transferred into data register
+ * of SPI controller and then wait until the completion will be marked
+ * by the IRQ Handler.
+ */
+static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct davinci_spi *davinci_spi;
+ int int_status, count, ret;
+ u8 conv, tmp;
+ u32 tx_data, data1_reg_val;
+ u32 buf_val, flg_val;
+ struct davinci_spi_platform_data *pdata;
+
+ davinci_spi = spi_master_get_devdata(spi->master);
+ pdata = davinci_spi->pdata;
+
+ davinci_spi->tx = t->tx_buf;
+ davinci_spi->rx = t->rx_buf;
+
+ /* convert len to words based on bits_per_word */
+ conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
+ davinci_spi->count = t->len / conv;
+
+ INIT_COMPLETION(davinci_spi->done);
+
+ ret = davinci_spi_bufs_prep(spi, davinci_spi);
+ if (ret)
+ return ret;
+
+ /* Enable SPI */
+ set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+
+ iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
+ (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
+ davinci_spi->base + SPIDELAY);
+
+ count = davinci_spi->count;
+ data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
+ tmp = ~(0x1 << spi->chip_select);
+
+ clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
+
+ data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
+
+ while ((ioread32(davinci_spi->base + SPIBUF)
+ & SPIBUF_RXEMPTY_MASK) == 0)
+ cpu_relax();
+
+ /* Determine the command to execute READ or WRITE */
+ if (t->tx_buf) {
+ clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
+
+ while (1) {
+ tx_data = davinci_spi->get_tx(davinci_spi);
+
+ data1_reg_val &= ~(0xFFFF);
+ data1_reg_val |= (0xFFFF & tx_data);
+
+ buf_val = ioread32(davinci_spi->base + SPIBUF);
+ if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
+ iowrite32(data1_reg_val,
+ davinci_spi->base + SPIDAT1);
+
+ count--;
+ }
+ while (ioread32(davinci_spi->base + SPIBUF)
+ & SPIBUF_RXEMPTY_MASK)
+ cpu_relax();
+
+ /* getting the returned byte */
+ if (t->rx_buf) {
+ buf_val = ioread32(davinci_spi->base + SPIBUF);
+ davinci_spi->get_rx(buf_val, davinci_spi);
+ }
+ if (count <= 0)
+ break;
+ }
+ } else {
+ if (pdata->poll_mode) {
+ while (1) {
+ /* keeps the serial clock going */
+ if ((ioread32(davinci_spi->base + SPIBUF)
+ & SPIBUF_TXFULL_MASK) == 0)
+ iowrite32(data1_reg_val,
+ davinci_spi->base + SPIDAT1);
+
+ while (ioread32(davinci_spi->base + SPIBUF) &
+ SPIBUF_RXEMPTY_MASK)
+ cpu_relax();
+
+ flg_val = ioread32(davinci_spi->base + SPIFLG);
+ buf_val = ioread32(davinci_spi->base + SPIBUF);
+
+ davinci_spi->get_rx(buf_val, davinci_spi);
+
+ count--;
+ if (count <= 0)
+ break;
+ }
+ } else { /* Receive in Interrupt mode */
+ int i;
+
+ for (i = 0; i < davinci_spi->count; i++) {
+ set_io_bits(davinci_spi->base + SPIINT,
+ SPIINT_BITERR_INTR
+ | SPIINT_OVRRUN_INTR
+ | SPIINT_RX_INTR);
+
+ iowrite32(data1_reg_val,
+ davinci_spi->base + SPIDAT1);
+
+ while (ioread32(davinci_spi->base + SPIINT) &
+ SPIINT_RX_INTR)
+ cpu_relax();
+ }
+ iowrite32((data1_reg_val & 0x0ffcffff),
+ davinci_spi->base + SPIDAT1);
+ }
+ }
+
+ /*
+ * Check for bit error, desync error,parity error,timeout error and
+ * receive overflow errors
+ */
+ int_status = ioread32(davinci_spi->base + SPIFLG);
+
+ ret = davinci_spi_check_error(davinci_spi, int_status);
+ if (ret != 0)
+ return ret;
+
+ /* SPI Framework maintains the count only in bytes so convert back */
+ davinci_spi->count *= conv;
+
+ return t->len;
+}
+
+#define DAVINCI_DMA_DATA_TYPE_S8 0x01
+#define DAVINCI_DMA_DATA_TYPE_S16 0x02
+#define DAVINCI_DMA_DATA_TYPE_S32 0x04
+
+static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct davinci_spi *davinci_spi;
+ int int_status = 0;
+ int count, temp_count;
+ u8 conv = 1;
+ u8 tmp;
+ u32 data1_reg_val;
+ struct davinci_spi_dma *davinci_spi_dma;
+ int word_len, data_type, ret;
+ unsigned long tx_reg, rx_reg;
+ struct davinci_spi_platform_data *pdata;
+ struct device *sdev;
+
+ davinci_spi = spi_master_get_devdata(spi->master);
+ pdata = davinci_spi->pdata;
+ sdev = davinci_spi->bitbang.master->dev.parent;
+
+ davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
+
+ tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
+ rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
+
+ davinci_spi->tx = t->tx_buf;
+ davinci_spi->rx = t->rx_buf;
+
+ /* convert len to words based on bits_per_word */
+ conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
+ davinci_spi->count = t->len / conv;
+
+ INIT_COMPLETION(davinci_spi->done);
+
+ init_completion(&davinci_spi_dma->dma_rx_completion);
+ init_completion(&davinci_spi_dma->dma_tx_completion);
+
+ word_len = conv * 8;
+
+ if (word_len <= 8)
+ data_type = DAVINCI_DMA_DATA_TYPE_S8;
+ else if (word_len <= 16)
+ data_type = DAVINCI_DMA_DATA_TYPE_S16;
+ else if (word_len <= 32)
+ data_type = DAVINCI_DMA_DATA_TYPE_S32;
+ else
+ return -EINVAL;
+
+ ret = davinci_spi_bufs_prep(spi, davinci_spi);
+ if (ret)
+ return ret;
+
+ /* Put delay val if required */
+ iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
+ (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
+ davinci_spi->base + SPIDELAY);
+
+ count = davinci_spi->count; /* the number of elements */
+ data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
+
+ /* CS default = 0xFF */
+ tmp = ~(0x1 << spi->chip_select);
+
+ clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
+
+ data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
+
+ /* disable all interrupts for dma transfers */
+ clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
+ /* Disable SPI to write configuration bits in SPIDAT */
+ clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+ iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
+ /* Enable SPI */
+ set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+
+ while ((ioread32(davinci_spi->base + SPIBUF)
+ & SPIBUF_RXEMPTY_MASK) == 0)
+ cpu_relax();
+
+
+ if (t->tx_buf) {
+ t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
+ dev_dbg(sdev, "Unable to DMA map a %d bytes"
+ " TX buffer\n", count);
+ return -ENOMEM;
+ }
+ temp_count = count;
+ } else {
+ /* We need TX clocking for RX transaction */
+ t->tx_dma = dma_map_single(&spi->dev,
+ (void *)davinci_spi->tmp_buf, count + 1,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
+ dev_dbg(sdev, "Unable to DMA map a %d bytes"
+ " TX tmp buffer\n", count);
+ return -ENOMEM;
+ }
+ temp_count = count + 1;
+ }
+
+ edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
+ data_type, temp_count, 1, 0, ASYNC);
+ edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
+ edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
+ edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
+ edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
+
+ if (t->rx_buf) {
+ /* initiate transaction */
+ iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
+
+ t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&spi->dev, t->rx_dma)) {
+ dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
+ count);
+ if (t->tx_buf != NULL)
+ dma_unmap_single(NULL, t->tx_dma,
+ count, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+ edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
+ data_type, count, 1, 0, ASYNC);
+ edma_set_src(davinci_spi_dma->dma_rx_channel,
+ rx_reg, INCR, W8BIT);
+ edma_set_dest(davinci_spi_dma->dma_rx_channel,
+ t->rx_dma, INCR, W8BIT);
+ edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
+ edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
+ data_type, 0);
+ }
+
+ if ((t->tx_buf) || (t->rx_buf))
+ edma_start(davinci_spi_dma->dma_tx_channel);
+
+ if (t->rx_buf)
+ edma_start(davinci_spi_dma->dma_rx_channel);
+
+ if ((t->rx_buf) || (t->tx_buf))
+ davinci_spi_set_dma_req(spi, 1);
+
+ if (t->tx_buf)
+ wait_for_completion_interruptible(
+ &davinci_spi_dma->dma_tx_completion);
+
+ if (t->rx_buf)
+ wait_for_completion_interruptible(
+ &davinci_spi_dma->dma_rx_completion);
+
+ dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
+
+ if (t->rx_buf)
+ dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
+
+ /*
+ * Check for bit error, desync error,parity error,timeout error and
+ * receive overflow errors
+ */
+ int_status = ioread32(davinci_spi->base + SPIFLG);
+
+ ret = davinci_spi_check_error(davinci_spi, int_status);
+ if (ret != 0)
+ return ret;
+
+ /* SPI Framework maintains the count only in bytes so convert back */
+ davinci_spi->count *= conv;
+
+ return t->len;
+}
+
+/**
+ * davinci_spi_irq - IRQ handler for DaVinci SPI
+ * @irq: IRQ number for this SPI Master
+ * @context_data: structure for SPI Master controller davinci_spi
+ */
+static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
+{
+ struct davinci_spi *davinci_spi = context_data;
+ u32 int_status, rx_data = 0;
+ irqreturn_t ret = IRQ_NONE;
+
+ int_status = ioread32(davinci_spi->base + SPIFLG);
+
+ while ((int_status & SPIFLG_RX_INTR_MASK)) {
+ if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
+ ret = IRQ_HANDLED;
+
+ rx_data = ioread32(davinci_spi->base + SPIBUF);
+ davinci_spi->get_rx(rx_data, davinci_spi);
+
+ /* Disable Receive Interrupt */
+ iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
+ davinci_spi->base + SPIINT);
+ } else
+ (void)davinci_spi_check_error(davinci_spi, int_status);
+
+ int_status = ioread32(davinci_spi->base + SPIFLG);
+ }
+
+ return ret;
+}
+
+/**
+ * davinci_spi_probe - probe function for SPI Master Controller
+ * @pdev: platform_device structure which contains plateform specific data
+ */
+static int davinci_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct davinci_spi *davinci_spi;
+ struct davinci_spi_platform_data *pdata;
+ struct resource *r, *mem;
+ resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
+ resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
+ resource_size_t dma_eventq = SPI_NO_RESOURCE;
+ int i = 0, ret = 0;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
+ if (master == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev_set_drvdata(&pdev->dev, master);
+
+ davinci_spi = spi_master_get_devdata(master);
+ if (davinci_spi == NULL) {
+ ret = -ENOENT;
+ goto free_master;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENOENT;
+ goto free_master;
+ }
+
+ davinci_spi->pbase = r->start;
+ davinci_spi->region_size = resource_size(r);
+ davinci_spi->pdata = pdata;
+
+ mem = request_mem_region(r->start, davinci_spi->region_size,
+ pdev->name);
+ if (mem == NULL) {
+ ret = -EBUSY;
+ goto free_master;
+ }
+
+ davinci_spi->base = (struct davinci_spi_reg __iomem *)
+ ioremap(r->start, davinci_spi->region_size);
+ if (davinci_spi->base == NULL) {
+ ret = -ENOMEM;
+ goto release_region;
+ }
+
+ davinci_spi->irq = platform_get_irq(pdev, 0);
+ if (davinci_spi->irq <= 0) {
+ ret = -EINVAL;
+ goto unmap_io;
+ }
+
+ ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
+ dev_name(&pdev->dev), davinci_spi);
+ if (ret)
+ goto unmap_io;
+
+ /* Allocate tmp_buf for tx_buf */
+ davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
+ if (davinci_spi->tmp_buf == NULL) {
+ ret = -ENOMEM;
+ goto irq_free;
+ }
+
+ davinci_spi->bitbang.master = spi_master_get(master);
+ if (davinci_spi->bitbang.master == NULL) {
+ ret = -ENODEV;
+ goto free_tmp_buf;
+ }
+
+ davinci_spi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(davinci_spi->clk)) {
+ ret = -ENODEV;
+ goto put_master;
+ }
+ clk_enable(davinci_spi->clk);
+
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = pdata->num_chipselect;
+ master->setup = davinci_spi_setup;
+ master->cleanup = davinci_spi_cleanup;
+
+ davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
+ davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
+
+ davinci_spi->version = pdata->version;
+ use_dma = pdata->use_dma;
+
+ davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
+ if (davinci_spi->version == SPI_VERSION_2)
+ davinci_spi->bitbang.flags |= SPI_READY;
+
+ if (use_dma) {
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r)
+ dma_rx_chan = r->start;
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r)
+ dma_tx_chan = r->start;
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
+ if (r)
+ dma_eventq = r->start;
+ }
+
+ if (!use_dma ||
+ dma_rx_chan == SPI_NO_RESOURCE ||
+ dma_tx_chan == SPI_NO_RESOURCE ||
+ dma_eventq == SPI_NO_RESOURCE) {
+ davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
+ use_dma = 0;
+ } else {
+ davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
+ davinci_spi->dma_channels = kzalloc(master->num_chipselect
+ * sizeof(struct davinci_spi_dma), GFP_KERNEL);
+ if (davinci_spi->dma_channels == NULL) {
+ ret = -ENOMEM;
+ goto free_clk;
+ }
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ davinci_spi->dma_channels[i].dma_rx_channel = -1;
+ davinci_spi->dma_channels[i].dma_rx_sync_dev =
+ dma_rx_chan;
+ davinci_spi->dma_channels[i].dma_tx_channel = -1;
+ davinci_spi->dma_channels[i].dma_tx_sync_dev =
+ dma_tx_chan;
+ davinci_spi->dma_channels[i].eventq = dma_eventq;
+ }
+ dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
+ "Using RX channel = %d , TX channel = %d and "
+ "event queue = %d", dma_rx_chan, dma_tx_chan,
+ dma_eventq);
+ }
+
+ davinci_spi->get_rx = davinci_spi_rx_buf_u8;
+ davinci_spi->get_tx = davinci_spi_tx_buf_u8;
+
+ init_completion(&davinci_spi->done);
+
+ /* Reset In/OUT SPI module */
+ iowrite32(0, davinci_spi->base + SPIGCR0);
+ udelay(100);
+ iowrite32(1, davinci_spi->base + SPIGCR0);
+
+ /* Clock internal */
+ if (davinci_spi->pdata->clk_internal)
+ set_io_bits(davinci_spi->base + SPIGCR1,
+ SPIGCR1_CLKMOD_MASK);
+ else
+ clear_io_bits(davinci_spi->base + SPIGCR1,
+ SPIGCR1_CLKMOD_MASK);
+
+ /* master mode default */
+ set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
+
+ if (davinci_spi->pdata->intr_level)
+ iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
+ else
+ iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
+
+ ret = spi_bitbang_start(&davinci_spi->bitbang);
+ if (ret)
+ goto free_clk;
+
+ dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base);
+
+ if (!pdata->poll_mode)
+ dev_info(&pdev->dev, "Operating in interrupt mode"
+ " using IRQ %d\n", davinci_spi->irq);
+
+ return ret;
+
+free_clk:
+ clk_disable(davinci_spi->clk);
+ clk_put(davinci_spi->clk);
+put_master:
+ spi_master_put(master);
+free_tmp_buf:
+ kfree(davinci_spi->tmp_buf);
+irq_free:
+ free_irq(davinci_spi->irq, davinci_spi);
+unmap_io:
+ iounmap(davinci_spi->base);
+release_region:
+ release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
+free_master:
+ kfree(master);
+err:
+ return ret;
+}
+
+/**
+ * davinci_spi_remove - remove function for SPI Master Controller
+ * @pdev: platform_device structure which contains plateform specific data
+ *
+ * This function will do the reverse action of davinci_spi_probe function
+ * It will free the IRQ and SPI controller's memory region.
+ * It will also call spi_bitbang_stop to destroy the work queue which was
+ * created by spi_bitbang_start.
+ */
+static int __exit davinci_spi_remove(struct platform_device *pdev)
+{
+ struct davinci_spi *davinci_spi;
+ struct spi_master *master;
+
+ master = dev_get_drvdata(&pdev->dev);
+ davinci_spi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&davinci_spi->bitbang);
+
+ clk_disable(davinci_spi->clk);
+ clk_put(davinci_spi->clk);
+ spi_master_put(master);
+ kfree(davinci_spi->tmp_buf);
+ free_irq(davinci_spi->irq, davinci_spi);
+ iounmap(davinci_spi->base);
+ release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
+
+ return 0;
+}
+
+static struct platform_driver davinci_spi_driver = {
+ .driver.name = "spi_davinci",
+ .remove = __exit_p(davinci_spi_remove),
+};
+
+static int __init davinci_spi_init(void)
+{
+ return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
+}
+module_init(davinci_spi_init);
+
+static void __exit davinci_spi_exit(void)
+{
+ platform_driver_unregister(&davinci_spi_driver);
+}
+module_exit(davinci_spi_exit);
+
+MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index 31620fae77be..8ed38f1d6c18 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -152,6 +152,7 @@ static void mrst_spi_debugfs_remove(struct dw_spi *dws)
#else
static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
{
+ return 0;
}
static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
@@ -161,14 +162,14 @@ static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
static void wait_till_not_busy(struct dw_spi *dws)
{
- unsigned long end = jiffies + usecs_to_jiffies(1000);
+ unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);
while (time_before(jiffies, end)) {
if (!(dw_readw(dws, sr) & SR_BUSY))
return;
}
dev_err(&dws->master->dev,
- "DW SPI: Stutus keeps busy for 1000us after a read/write!\n");
+ "DW SPI: Status keeps busy for 1000us after a read/write!\n");
}
static void flush(struct dw_spi *dws)
@@ -358,6 +359,8 @@ static void transfer_complete(struct dw_spi *dws)
static irqreturn_t interrupt_transfer(struct dw_spi *dws)
{
u16 irq_status, irq_mask = 0x3f;
+ u32 int_level = dws->fifo_len / 2;
+ u32 left;
irq_status = dw_readw(dws, isr) & irq_mask;
/* Error handling */
@@ -369,22 +372,23 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
return IRQ_HANDLED;
}
- /* INT comes from tx */
- if (dws->tx && (irq_status & SPI_INT_TXEI)) {
- while (dws->tx < dws->tx_end)
+ if (irq_status & SPI_INT_TXEI) {
+ spi_mask_intr(dws, SPI_INT_TXEI);
+
+ left = (dws->tx_end - dws->tx) / dws->n_bytes;
+ left = (left > int_level) ? int_level : left;
+
+ while (left--)
dws->write(dws);
+ dws->read(dws);
- if (dws->tx == dws->tx_end) {
- spi_mask_intr(dws, SPI_INT_TXEI);
+ /* Re-enable the IRQ if there is still data left to tx */
+ if (dws->tx_end > dws->tx)
+ spi_umask_intr(dws, SPI_INT_TXEI);
+ else
transfer_complete(dws);
- }
}
- /* INT comes from rx */
- if (dws->rx && (irq_status & SPI_INT_RXFI)) {
- if (dws->read(dws))
- transfer_complete(dws);
- }
return IRQ_HANDLED;
}
@@ -404,12 +408,9 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id)
/* Must be called inside pump_transfers() */
static void poll_transfer(struct dw_spi *dws)
{
- if (dws->tx) {
- while (dws->write(dws))
- dws->read(dws);
- }
+ while (dws->write(dws))
+ dws->read(dws);
- dws->read(dws);
transfer_complete(dws);
}
@@ -428,6 +429,7 @@ static void pump_transfers(unsigned long data)
u8 bits = 0;
u8 imask = 0;
u8 cs_change = 0;
+ u16 txint_level = 0;
u16 clk_div = 0;
u32 speed = 0;
u32 cr0 = 0;
@@ -438,6 +440,9 @@ static void pump_transfers(unsigned long data)
chip = dws->cur_chip;
spi = message->spi;
+ if (unlikely(!chip->clk_div))
+ chip->clk_div = dws->max_freq / chip->speed_hz;
+
if (message->state == ERROR_STATE) {
message->status = -EIO;
goto early_exit;
@@ -492,7 +497,7 @@ static void pump_transfers(unsigned long data)
/* clk_div doesn't support odd number */
clk_div = dws->max_freq / speed;
- clk_div = (clk_div >> 1) << 1;
+ clk_div = (clk_div + 1) & 0xfffe;
chip->speed_hz = speed;
chip->clk_div = clk_div;
@@ -532,14 +537,35 @@ static void pump_transfers(unsigned long data)
}
message->state = RUNNING_STATE;
+ /*
+ * Adjust transfer mode if necessary. Requires platform dependent
+ * chipselect mechanism.
+ */
+ if (dws->cs_control) {
+ if (dws->rx && dws->tx)
+ chip->tmode = 0x00;
+ else if (dws->rx)
+ chip->tmode = 0x02;
+ else
+ chip->tmode = 0x01;
+
+ cr0 &= ~(0x3 << SPI_MODE_OFFSET);
+ cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
+ }
+
/* Check if current transfer is a DMA transaction */
dws->dma_mapped = map_dma_buffers(dws);
+ /*
+ * Interrupt mode
+ * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
+ */
if (!dws->dma_mapped && !chip->poll_mode) {
- if (dws->rx)
- imask |= SPI_INT_RXFI;
- if (dws->tx)
- imask |= SPI_INT_TXEI;
+ int templen = dws->len / dws->n_bytes;
+ txint_level = dws->fifo_len / 2;
+ txint_level = (templen > txint_level) ? txint_level : templen;
+
+ imask |= SPI_INT_TXEI;
dws->transfer_handler = interrupt_transfer;
}
@@ -549,21 +575,23 @@ static void pump_transfers(unsigned long data)
* 2. clk_div is changed
* 3. control value changes
*/
- if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) {
+ if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) {
spi_enable_chip(dws, 0);
if (dw_readw(dws, ctrl0) != cr0)
dw_writew(dws, ctrl0, cr0);
+ spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
+ spi_chip_sel(dws, spi->chip_select);
+
/* Set the interrupt mask, for poll mode just diable all int */
spi_mask_intr(dws, 0xff);
- if (!chip->poll_mode)
+ if (imask)
spi_umask_intr(dws, imask);
+ if (txint_level)
+ dw_writew(dws, txfltr, txint_level);
- spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
- spi_chip_sel(dws, spi->chip_select);
spi_enable_chip(dws, 1);
-
if (cs_change)
dws->prev_chip = chip;
}
@@ -712,11 +740,11 @@ static int dw_spi_setup(struct spi_device *spi)
}
chip->bits_per_word = spi->bits_per_word;
+ if (!spi->max_speed_hz) {
+ dev_err(&spi->dev, "No max speed HZ parameter\n");
+ return -EINVAL;
+ }
chip->speed_hz = spi->max_speed_hz;
- if (chip->speed_hz)
- chip->clk_div = 25000000 / chip->speed_hz;
- else
- chip->clk_div = 8; /* default value */
chip->tmode = 0; /* Tx & Rx */
/* Default SPI mode is SCPOL = 0, SCPH = 0 */
@@ -735,7 +763,7 @@ static void dw_spi_cleanup(struct spi_device *spi)
kfree(chip);
}
-static int __init init_queue(struct dw_spi *dws)
+static int __devinit init_queue(struct dw_spi *dws)
{
INIT_LIST_HEAD(&dws->queue);
spin_lock_init(&dws->lock);
@@ -817,6 +845,22 @@ static void spi_hw_init(struct dw_spi *dws)
spi_mask_intr(dws, 0xff);
spi_enable_chip(dws, 1);
flush(dws);
+
+ /*
+ * Try to detect the FIFO depth if not set by interface driver,
+ * the depth could be from 2 to 256 from HW spec
+ */
+ if (!dws->fifo_len) {
+ u32 fifo;
+ for (fifo = 2; fifo <= 257; fifo++) {
+ dw_writew(dws, txfltr, fifo);
+ if (fifo != dw_readw(dws, txfltr))
+ break;
+ }
+
+ dws->fifo_len = (fifo == 257) ? 0 : fifo;
+ dw_writew(dws, txfltr, 0);
+ }
}
int __devinit dw_spi_add_host(struct dw_spi *dws)
@@ -913,6 +957,7 @@ void __devexit dw_spi_remove_host(struct dw_spi *dws)
/* Disconnect from the SPI framework */
spi_unregister_master(dws->master);
}
+EXPORT_SYMBOL(dw_spi_remove_host);
int dw_spi_suspend_host(struct dw_spi *dws)
{
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c
new file mode 100644
index 000000000000..e35b45ac5174
--- /dev/null
+++ b/drivers/spi/dw_spi_mmio.c
@@ -0,0 +1,147 @@
+/*
+ * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core
+ *
+ * Copyright (c) 2010, Octasic semiconductor.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/dw_spi.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "dw_spi_mmio"
+
+struct dw_spi_mmio {
+ struct dw_spi dws;
+ struct clk *clk;
+};
+
+static int __devinit dw_spi_mmio_probe(struct platform_device *pdev)
+{
+ struct dw_spi_mmio *dwsmmio;
+ struct dw_spi *dws;
+ struct resource *mem, *ioarea;
+ int ret;
+
+ dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL);
+ if (!dwsmmio) {
+ ret = -ENOMEM;
+ goto err_end;
+ }
+
+ dws = &dwsmmio->dws;
+
+ /* Get basic io resource and map it */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ ret = -EINVAL;
+ goto err_kfree;
+ }
+
+ ioarea = request_mem_region(mem->start, resource_size(mem),
+ pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "SPI region already claimed\n");
+ ret = -EBUSY;
+ goto err_kfree;
+ }
+
+ dws->regs = ioremap_nocache(mem->start, resource_size(mem));
+ if (!dws->regs) {
+ dev_err(&pdev->dev, "SPI region already mapped\n");
+ ret = -ENOMEM;
+ goto err_release_reg;
+ }
+
+ dws->irq = platform_get_irq(pdev, 0);
+ if (dws->irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ ret = dws->irq; /* -ENXIO */
+ goto err_unmap;
+ }
+
+ dwsmmio->clk = clk_get(&pdev->dev, NULL);
+ if (!dwsmmio->clk) {
+ ret = -ENODEV;
+ goto err_irq;
+ }
+ clk_enable(dwsmmio->clk);
+
+ dws->parent_dev = &pdev->dev;
+ dws->bus_num = 0;
+ dws->num_cs = 4;
+ dws->max_freq = clk_get_rate(dwsmmio->clk);
+
+ ret = dw_spi_add_host(dws);
+ if (ret)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, dwsmmio);
+ return 0;
+
+err_clk:
+ clk_disable(dwsmmio->clk);
+ clk_put(dwsmmio->clk);
+ dwsmmio->clk = NULL;
+err_irq:
+ free_irq(dws->irq, dws);
+err_unmap:
+ iounmap(dws->regs);
+err_release_reg:
+ release_mem_region(mem->start, resource_size(mem));
+err_kfree:
+ kfree(dwsmmio);
+err_end:
+ return ret;
+}
+
+static int __devexit dw_spi_mmio_remove(struct platform_device *pdev)
+{
+ struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
+ struct resource *mem;
+
+ platform_set_drvdata(pdev, NULL);
+
+ clk_disable(dwsmmio->clk);
+ clk_put(dwsmmio->clk);
+ dwsmmio->clk = NULL;
+
+ free_irq(dwsmmio->dws.irq, &dwsmmio->dws);
+ dw_spi_remove_host(&dwsmmio->dws);
+ iounmap(dwsmmio->dws.regs);
+ kfree(dwsmmio);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+ return 0;
+}
+
+static struct platform_driver dw_spi_mmio_driver = {
+ .remove = __devexit_p(dw_spi_mmio_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init dw_spi_mmio_init(void)
+{
+ return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe);
+}
+module_init(dw_spi_mmio_init);
+
+static void __exit dw_spi_mmio_exit(void)
+{
+ platform_driver_unregister(&dw_spi_mmio_driver);
+}
+module_exit(dw_spi_mmio_exit);
+
+MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>");
+MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c
index 34ba69161734..1f0735f9cc76 100644
--- a/drivers/spi/dw_spi_pci.c
+++ b/drivers/spi/dw_spi_pci.c
@@ -73,6 +73,7 @@ static int __devinit spi_pci_probe(struct pci_dev *pdev,
dws->num_cs = 4;
dws->max_freq = 25000000; /* for Moorestwon */
dws->irq = pdev->irq;
+ dws->fifo_len = 40; /* FIFO has 40 words buffer */
ret = dw_spi_add_host(dws);
if (ret)
@@ -98,6 +99,7 @@ static void __devexit spi_pci_remove(struct pci_dev *pdev)
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
pci_set_drvdata(pdev, NULL);
+ dw_spi_remove_host(&dwpci->dws);
iounmap(dwpci->dws.regs);
pci_release_region(pdev, 0);
kfree(dwpci);
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index f50c81df336a..04747868d6c4 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -503,7 +503,7 @@ static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op)
return mpc52xx_psc_spi_do_remove(&op->dev);
}
-static struct of_device_id mpc52xx_psc_spi_of_match[] = {
+static const struct of_device_id mpc52xx_psc_spi_of_match[] = {
{ .compatible = "fsl,mpc5200-psc-spi", },
{ .compatible = "mpc5200-psc-spi", }, /* old */
{}
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c
index 45bfe6458173..6eab46537a0a 100644
--- a/drivers/spi/mpc52xx_spi.c
+++ b/drivers/spi/mpc52xx_spi.c
@@ -550,7 +550,7 @@ static int __devexit mpc52xx_spi_remove(struct of_device *op)
return 0;
}
-static struct of_device_id mpc52xx_spi_match[] __devinitdata = {
+static const struct of_device_id mpc52xx_spi_match[] __devinitconst = {
{ .compatible = "fsl,mpc5200-spi", },
{}
};
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 1893f1e96dc4..0ddbbe45e834 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -469,7 +469,7 @@ static int spi_imx_setup(struct spi_device *spi)
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
int gpio = spi_imx->chipselect[spi->chip_select];
- pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__,
+ dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
spi->mode, spi->bits_per_word, spi->max_speed_hz);
if (gpio >= 0)
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index 1fb2a6ea328c..4f0cc9d457e0 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -365,7 +365,7 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if ((mpc8xxx_spi->spibrg / hz) > 64) {
cs->hw_mode |= SPMODE_DIV16;
- pm = mpc8xxx_spi->spibrg / (hz * 64);
+ pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
"Will use %d Hz instead.\n", dev_name(&spi->dev),
@@ -373,7 +373,7 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if (pm > 16)
pm = 16;
} else
- pm = mpc8xxx_spi->spibrg / (hz * 4);
+ pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
if (pm)
pm--;
@@ -1328,7 +1328,7 @@ static struct of_platform_driver of_mpc8xxx_spi_driver = {
static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
{
struct resource *mem;
- unsigned int irq;
+ int irq;
struct spi_master *master;
if (!pdev->dev.platform_data)
@@ -1339,7 +1339,7 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
- if (!irq)
+ if (irq <= 0)
return -EINVAL;
master = mpc8xxx_spi_probe(&pdev->dev, mem, irq);
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c
index 140a18d6cf3e..6d8d4026a07a 100644
--- a/drivers/spi/spi_ppc4xx.c
+++ b/drivers/spi/spi_ppc4xx.c
@@ -578,7 +578,7 @@ static int __exit spi_ppc4xx_of_remove(struct of_device *op)
return 0;
}
-static struct of_device_id spi_ppc4xx_of_match[] = {
+static const struct of_device_id spi_ppc4xx_of_match[] = {
{ .compatible = "ibm,ppc4xx-spi", },
{},
};
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index 88a456dba967..97365815a729 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -28,7 +28,7 @@
#include <linux/spi/spi.h>
#include <mach/dma.h>
-#include <plat/spi.h>
+#include <plat/s3c64xx-spi.h>
/* Registers and bit-fields */
@@ -137,6 +137,7 @@
/**
* struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
* @clk: Pointer to the spi clock.
+ * @src_clk: Pointer to the clock used to generate SPI signals.
* @master: Pointer to the SPI Protocol master.
* @workqueue: Work queue for the SPI xfer requests.
* @cntrlr_info: Platform specific data for the controller this driver manages.
@@ -157,10 +158,11 @@
struct s3c64xx_spi_driver_data {
void __iomem *regs;
struct clk *clk;
+ struct clk *src_clk;
struct platform_device *pdev;
struct spi_master *master;
struct workqueue_struct *workqueue;
- struct s3c64xx_spi_cntrlr_info *cntrlr_info;
+ struct s3c64xx_spi_info *cntrlr_info;
struct spi_device *tgl_spi;
struct work_struct work;
struct list_head queue;
@@ -180,7 +182,7 @@ static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
{
- struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned long loops;
u32 val;
@@ -225,7 +227,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi,
struct spi_transfer *xfer, int dma_mode)
{
- struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
u32 modecfg, chcfg;
@@ -298,19 +300,20 @@ static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
/* Deselect the last toggled device */
cs = sdd->tgl_spi->controller_data;
- cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
+ cs->set_level(cs->line,
+ spi->mode & SPI_CS_HIGH ? 0 : 1);
}
sdd->tgl_spi = NULL;
}
cs = spi->controller_data;
- cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0);
+ cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
}
static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode)
{
- struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned long val;
int ms;
@@ -384,12 +387,11 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
if (sdd->tgl_spi == spi)
sdd->tgl_spi = NULL;
- cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
+ cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{
- struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
u32 val;
@@ -435,7 +437,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_PSR_MASK;
- val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1)
+ val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
& S3C64XX_SPI_PSR_MASK);
writel(val, regs + S3C64XX_SPI_CLK_CFG);
@@ -558,7 +560,7 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
- struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct spi_transfer *xfer;
@@ -632,8 +634,8 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
S3C64XX_SPI_DEACT(sdd);
if (status) {
- dev_err(&spi->dev, "I/O Error: \
- rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
+ dev_err(&spi->dev, "I/O Error: "
+ "rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
(sdd->state & RXBUSY) ? 'f' : 'p',
(sdd->state & TXBUSY) ? 'f' : 'p',
@@ -786,7 +788,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct s3c64xx_spi_driver_data *sdd;
- struct s3c64xx_spi_cntrlr_info *sci;
+ struct s3c64xx_spi_info *sci;
struct spi_message *msg;
u32 psr, speed;
unsigned long flags;
@@ -831,17 +833,17 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
/* Check if we can provide the requested rate */
- speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */
+ speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */
if (spi->max_speed_hz > speed)
spi->max_speed_hz = speed;
- psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1;
+ psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
psr &= S3C64XX_SPI_PSR_MASK;
if (psr == S3C64XX_SPI_PSR_MASK)
psr--;
- speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
+ speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
if (spi->max_speed_hz < speed) {
if (psr+1 < S3C64XX_SPI_PSR_MASK) {
psr++;
@@ -851,7 +853,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
}
- speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
+ speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
if (spi->max_speed_hz >= speed)
spi->max_speed_hz = speed;
else
@@ -867,7 +869,7 @@ setup_exit:
static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
{
- struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned int val;
@@ -902,7 +904,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
{
struct resource *mem_res, *dmatx_res, *dmarx_res;
struct s3c64xx_spi_driver_data *sdd;
- struct s3c64xx_spi_cntrlr_info *sci;
+ struct s3c64xx_spi_info *sci;
struct spi_master *master;
int ret;
@@ -1000,18 +1002,15 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
goto err4;
}
- if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK)
- sci->src_clk = sdd->clk;
- else
- sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
- if (IS_ERR(sci->src_clk)) {
+ sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
+ if (IS_ERR(sdd->src_clk)) {
dev_err(&pdev->dev,
"Unable to acquire clock '%s'\n", sci->src_clk_name);
- ret = PTR_ERR(sci->src_clk);
+ ret = PTR_ERR(sdd->src_clk);
goto err5;
}
- if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) {
+ if (clk_enable(sdd->src_clk)) {
dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
sci->src_clk_name);
ret = -EBUSY;
@@ -1040,11 +1039,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
goto err8;
}
- dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \
- with %d Slaves attached\n",
+ dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
+ "with %d Slaves attached\n",
pdev->id, master->num_chipselect);
- dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\
- \tDMA=[Rx-%d, Tx-%d]\n",
+ dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
mem_res->end, mem_res->start,
sdd->rx_dmach, sdd->tx_dmach);
@@ -1053,11 +1051,9 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
err8:
destroy_workqueue(sdd->workqueue);
err7:
- if (sci->src_clk != sdd->clk)
- clk_disable(sci->src_clk);
+ clk_disable(sdd->src_clk);
err6:
- if (sci->src_clk != sdd->clk)
- clk_put(sci->src_clk);
+ clk_put(sdd->src_clk);
err5:
clk_disable(sdd->clk);
err4:
@@ -1078,7 +1074,6 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
struct resource *mem_res;
unsigned long flags;
@@ -1093,11 +1088,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
destroy_workqueue(sdd->workqueue);
- if (sci->src_clk != sdd->clk)
- clk_disable(sci->src_clk);
-
- if (sci->src_clk != sdd->clk)
- clk_put(sci->src_clk);
+ clk_disable(sdd->src_clk);
+ clk_put(sdd->src_clk);
clk_disable(sdd->clk);
clk_put(sdd->clk);
@@ -1105,7 +1097,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
iounmap((void *) sdd->regs);
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem_res->start, resource_size(mem_res));
+ if (mem_res != NULL)
+ release_mem_region(mem_res->start, resource_size(mem_res));
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
@@ -1118,8 +1111,6 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
- struct s3c64xx_spi_csinfo *cs;
unsigned long flags;
spin_lock_irqsave(&sdd->lock, flags);
@@ -1130,9 +1121,7 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
msleep(10);
/* Disable the clock */
- if (sci->src_clk != sdd->clk)
- clk_disable(sci->src_clk);
-
+ clk_disable(sdd->src_clk);
clk_disable(sdd->clk);
sdd->cur_speed = 0; /* Output Clock is stopped */
@@ -1144,15 +1133,13 @@ static int s3c64xx_spi_resume(struct platform_device *pdev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
unsigned long flags;
sci->cfg_gpio(pdev);
/* Enable the clock */
- if (sci->src_clk != sdd->clk)
- clk_enable(sci->src_clk);
-
+ clk_enable(sdd->src_clk);
clk_enable(sdd->clk);
s3c64xx_spi_hwinit(sdd, pdev->id);
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index 30973ec16a93..d93b66743ba7 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -20,12 +20,12 @@
#include <linux/bitmap.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/err.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/sh_msiof.h>
-#include <asm/spi.h>
#include <asm/unaligned.h>
struct sh_msiof_spi_priv {
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c
index 2552bb364005..fadff76eb7e0 100644
--- a/drivers/spi/spi_stmp.c
+++ b/drivers/spi/spi_stmp.c
@@ -76,7 +76,7 @@ struct stmp_spi {
break; \
} \
cpu_relax(); \
- } while (time_before(end_jiffies, jiffies)); \
+ } while (time_before(jiffies, end_jiffies)); \
succeeded; \
})
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 9f386379c169..1b47363cb73f 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -93,6 +93,26 @@ struct xilinx_spi {
void (*rx_fn) (struct xilinx_spi *);
};
+static void xspi_write32(u32 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+}
+
+static unsigned int xspi_read32(void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static void xspi_write32_be(u32 val, void __iomem *addr)
+{
+ iowrite32be(val, addr);
+}
+
+static unsigned int xspi_read32_be(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
static void xspi_tx8(struct xilinx_spi *xspi)
{
xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET);
@@ -374,11 +394,11 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
xspi->mem = *mem;
xspi->irq = irq;
if (pdata->little_endian) {
- xspi->read_fn = ioread32;
- xspi->write_fn = iowrite32;
+ xspi->read_fn = xspi_read32;
+ xspi->write_fn = xspi_write32;
} else {
- xspi->read_fn = ioread32be;
- xspi->write_fn = iowrite32be;
+ xspi->read_fn = xspi_read32_be;
+ xspi->write_fn = xspi_write32_be;
}
xspi->bits_per_word = pdata->bits_per_word;
if (xspi->bits_per_word == 8) {
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c
index 71dc3adc0495..ed34a8d419c7 100644
--- a/drivers/spi/xilinx_spi_of.c
+++ b/drivers/spi/xilinx_spi_of.c
@@ -99,7 +99,7 @@ static int __exit xilinx_spi_of_remove(struct of_device *op)
return xilinx_spi_remove(op);
}
-static struct of_device_id xilinx_spi_of_match[] = {
+static const struct of_device_id xilinx_spi_of_match[] = {
{ .compatible = "xlnx,xps-spi-2.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.b", },
{}
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h
index 51b3e771a9a3..cc813f95a2f2 100644
--- a/include/linux/spi/dw_spi.h
+++ b/include/linux/spi/dw_spi.h
@@ -90,6 +90,7 @@ struct dw_spi {
unsigned long paddr;
u32 iolen;
int irq;
+ u32 fifo_len; /* depth of the FIFO buffer */
u32 max_freq; /* max bus freq supported */
u16 bus_num;
@@ -171,6 +172,10 @@ static inline void spi_chip_sel(struct dw_spi *dws, u16 cs)
{
if (cs > dws->num_cs)
return;
+
+ if (dws->cs_control)
+ dws->cs_control(1);
+
dw_writel(dws, ser, 1 << cs);
}