summaryrefslogtreecommitdiff
path: root/drivers/tty/serial/amba-pl011.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tty/serial/amba-pl011.c')
-rw-r--r--drivers/tty/serial/amba-pl011.c1119
1 files changed, 700 insertions, 419 deletions
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 89ade213a1a9..7f17d288c807 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -16,15 +16,11 @@
* and hooked into this driver.
*/
-
-#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
+#include <linux/platform_device.h>
#include <linux/sysrq.h>
#include <linux/device.h>
#include <linux/tty.h>
@@ -41,14 +37,11 @@
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/sizes.h>
#include <linux/io.h>
#include <linux/acpi.h>
-#include "amba-pl011.h"
-
#define UART_NR 14
#define SERIAL_AMBA_MAJOR 204
@@ -57,8 +50,38 @@
#define AMBA_ISR_PASS_LIMIT 256
-#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
-#define UART_DUMMY_DR_RX (1 << 16)
+#define UART_DR_ERROR (UART011_DR_OE | UART011_DR_BE | UART011_DR_PE | UART011_DR_FE)
+#define UART_DUMMY_DR_RX BIT(16)
+
+enum {
+ REG_DR,
+ REG_ST_DMAWM,
+ REG_ST_TIMEOUT,
+ REG_FR,
+ REG_LCRH_RX,
+ REG_LCRH_TX,
+ REG_IBRD,
+ REG_FBRD,
+ REG_CR,
+ REG_IFLS,
+ REG_IMSC,
+ REG_RIS,
+ REG_MIS,
+ REG_ICR,
+ REG_DMACR,
+ REG_ST_XFCR,
+ REG_ST_XON1,
+ REG_ST_XON2,
+ REG_ST_XOFF1,
+ REG_ST_XOFF2,
+ REG_ST_ITCR,
+ REG_ST_ITIP,
+ REG_ST_ABCR,
+ REG_ST_ABIMSC,
+
+ /* The size of the array - must be last */
+ REG_ARRAY_SIZE,
+};
static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
[REG_DR] = UART01x_DR,
@@ -102,7 +125,7 @@ static unsigned int get_fifosize_arm(struct amba_device *dev)
static struct vendor_data vendor_arm = {
.reg_offset = pl011_std_offsets,
- .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
+ .ifls = UART011_IFLS_RX4_8 | UART011_IFLS_TX4_8,
.fr_busy = UART01x_FR_BUSY,
.fr_dsr = UART01x_FR_DSR,
.fr_cts = UART01x_FR_CTS,
@@ -180,7 +203,7 @@ static unsigned int get_fifosize_st(struct amba_device *dev)
static struct vendor_data vendor_st = {
.reg_offset = pl011_st_offsets,
- .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
+ .ifls = UART011_IFLS_RX_HALF | UART011_IFLS_TX_HALF,
.fr_busy = UART01x_FR_BUSY,
.fr_dsr = UART01x_FR_DSR,
.fr_cts = UART01x_FR_CTS,
@@ -193,51 +216,20 @@ static struct vendor_data vendor_st = {
.get_fifosize = get_fifosize_st,
};
-static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
- [REG_DR] = ZX_UART011_DR,
- [REG_FR] = ZX_UART011_FR,
- [REG_LCRH_RX] = ZX_UART011_LCRH,
- [REG_LCRH_TX] = ZX_UART011_LCRH,
- [REG_IBRD] = ZX_UART011_IBRD,
- [REG_FBRD] = ZX_UART011_FBRD,
- [REG_CR] = ZX_UART011_CR,
- [REG_IFLS] = ZX_UART011_IFLS,
- [REG_IMSC] = ZX_UART011_IMSC,
- [REG_RIS] = ZX_UART011_RIS,
- [REG_MIS] = ZX_UART011_MIS,
- [REG_ICR] = ZX_UART011_ICR,
- [REG_DMACR] = ZX_UART011_DMACR,
-};
-
-static unsigned int get_fifosize_zte(struct amba_device *dev)
-{
- return 16;
-}
-
-static struct vendor_data vendor_zte = {
- .reg_offset = pl011_zte_offsets,
- .access_32b = true,
- .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
- .fr_busy = ZX_UART01x_FR_BUSY,
- .fr_dsr = ZX_UART01x_FR_DSR,
- .fr_cts = ZX_UART01x_FR_CTS,
- .fr_ri = ZX_UART011_FR_RI,
- .get_fifosize = get_fifosize_zte,
-};
-
/* Deals with DMA transactions */
-struct pl011_sgbuf {
- struct scatterlist sg;
- char *buf;
+struct pl011_dmabuf {
+ dma_addr_t dma;
+ size_t len;
+ char *buf;
};
struct pl011_dmarx_data {
struct dma_chan *chan;
struct completion complete;
bool use_buf_b;
- struct pl011_sgbuf sgbuf_a;
- struct pl011_sgbuf sgbuf_b;
+ struct pl011_dmabuf dbuf_a;
+ struct pl011_dmabuf dbuf_b;
dma_cookie_t cookie;
bool running;
struct timer_list timer;
@@ -250,11 +242,19 @@ struct pl011_dmarx_data {
struct pl011_dmatx_data {
struct dma_chan *chan;
- struct scatterlist sg;
+ dma_addr_t dma;
+ size_t len;
char *buf;
bool queued;
};
+enum pl011_rs485_tx_state {
+ OFF,
+ WAIT_AFTER_RTS,
+ SEND,
+ WAIT_AFTER_SEND,
+};
+
/*
* We wrap our port structure around the generic uart_port.
*/
@@ -263,15 +263,19 @@ struct uart_amba_port {
const u16 *reg_offset;
struct clk *clk;
const struct vendor_data *vendor;
- unsigned int dmacr; /* dma control reg */
unsigned int im; /* interrupt mask */
unsigned int old_status;
unsigned int fifosize; /* vendor-specific */
- unsigned int old_cr; /* state during shutdown */
unsigned int fixed_baud; /* vendor-set fixed baud rate */
char type[12];
+ ktime_t rs485_tx_drain_interval; /* nano */
+ enum pl011_rs485_tx_state rs485_tx_state;
+ struct hrtimer trigger_start_tx;
+ struct hrtimer trigger_stop_tx;
+ bool console_line_ended;
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
+ unsigned int dmacr; /* dma control reg */
bool using_tx_dma;
bool using_rx_dma;
struct pl011_dmarx_data dmarx;
@@ -280,14 +284,16 @@ struct uart_amba_port {
#endif
};
+static unsigned int pl011_tx_empty(struct uart_port *port);
+
static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
- unsigned int reg)
+ unsigned int reg)
{
return uap->reg_offset[reg];
}
static unsigned int pl011_read(const struct uart_amba_port *uap,
- unsigned int reg)
+ unsigned int reg)
{
void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
@@ -296,7 +302,7 @@ static unsigned int pl011_read(const struct uart_amba_port *uap,
}
static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
- unsigned int reg)
+ unsigned int reg)
{
void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
@@ -313,8 +319,10 @@ static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
*/
static int pl011_fifo_to_tty(struct uart_amba_port *uap)
{
+ unsigned int ch, fifotaken;
+ int sysrq;
u16 status;
- unsigned int ch, flag, fifotaken;
+ u8 flag;
for (fifotaken = 0; fifotaken != 256; fifotaken++) {
status = pl011_read(uap, REG_FR);
@@ -332,10 +340,11 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
uap->port.icount.brk++;
if (uart_handle_break(&uap->port))
continue;
- } else if (ch & UART011_DR_PE)
+ } else if (ch & UART011_DR_PE) {
uap->port.icount.parity++;
- else if (ch & UART011_DR_FE)
+ } else if (ch & UART011_DR_FE) {
uap->port.icount.frame++;
+ }
if (ch & UART011_DR_OE)
uap->port.icount.overrun++;
@@ -349,16 +358,14 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
flag = TTY_FRAME;
}
- if (uart_handle_sysrq_char(&uap->port, ch & 255))
- continue;
-
- uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+ sysrq = uart_prepare_sysrq_char(&uap->port, ch & 255);
+ if (!sysrq)
+ uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
}
return fifotaken;
}
-
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
@@ -368,32 +375,24 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
-static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
- enum dma_data_direction dir)
+static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
+ enum dma_data_direction dir)
{
- dma_addr_t dma_addr;
-
- sg->buf = dma_alloc_coherent(chan->device->dev,
- PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
- if (!sg->buf)
+ db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
+ &db->dma, GFP_KERNEL);
+ if (!db->buf)
return -ENOMEM;
-
- sg_init_table(&sg->sg, 1);
- sg_set_page(&sg->sg, phys_to_page(dma_addr),
- PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
- sg_dma_address(&sg->sg) = dma_addr;
- sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
+ db->len = PL011_DMA_BUFFER_SIZE;
return 0;
}
-static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
- enum dma_data_direction dir)
+static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
+ enum dma_data_direction dir)
{
- if (sg->buf) {
+ if (db->buf) {
dma_free_coherent(chan->device->dev,
- PL011_DMA_BUFFER_SIZE, sg->buf,
- sg_dma_address(&sg->sg));
+ PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
}
}
@@ -414,7 +413,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
dma_cap_mask_t mask;
uap->dma_probed = true;
- chan = dma_request_slave_channel_reason(dev, "tx");
+ chan = dma_request_chan(dev, "tx");
if (IS_ERR(chan)) {
if (PTR_ERR(chan) == -EPROBE_DEFER) {
uap->dma_probed = false;
@@ -423,7 +422,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
/* We need platform data */
if (!plat || !plat->dma_filter) {
- dev_info(uap->port.dev, "no DMA platform data\n");
+ dev_dbg(uap->port.dev, "no DMA platform data\n");
return;
}
@@ -432,7 +431,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
dma_cap_set(DMA_SLAVE, mask);
chan = dma_request_channel(mask, plat->dma_filter,
- plat->dma_tx_param);
+ plat->dma_tx_param);
if (!chan) {
dev_err(uap->port.dev, "no TX DMA channel!\n");
return;
@@ -446,9 +445,9 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
dma_chan_name(uap->dmatx.chan));
/* Optionally make use of an RX channel as well */
- chan = dma_request_slave_channel(dev, "rx");
+ chan = dma_request_chan(dev, "rx");
- if (!chan && plat && plat->dma_rx_param) {
+ if (IS_ERR(chan) && plat && plat->dma_rx_param) {
chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
if (!chan) {
@@ -457,7 +456,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
}
}
- if (chan) {
+ if (!IS_ERR(chan)) {
struct dma_slave_config rx_conf = {
.src_addr = uap->port.mapbase +
pl011_reg_to_offset(uap, REG_DR),
@@ -473,12 +472,12 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
* If the controller does, check for suitable residue processing
* otherwise assime all is well.
*/
- if (0 == dma_get_slave_caps(chan, &caps)) {
+ if (dma_get_slave_caps(chan, &caps) == 0) {
if (caps.residue_granularity ==
DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
dma_release_channel(chan);
dev_info(uap->port.dev,
- "RX DMA disabled - no residue processing\n");
+ "RX DMA disabled - no residue processing\n");
return;
}
}
@@ -507,18 +506,16 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
else
uap->dmarx.poll_timeout = 3000;
} else if (!plat && dev->of_node) {
- uap->dmarx.auto_poll_rate = of_property_read_bool(
- dev->of_node, "auto-poll");
+ uap->dmarx.auto_poll_rate =
+ of_property_read_bool(dev->of_node, "auto-poll");
if (uap->dmarx.auto_poll_rate) {
u32 x;
- if (0 == of_property_read_u32(dev->of_node,
- "poll-rate-ms", &x))
+ if (of_property_read_u32(dev->of_node, "poll-rate-ms", &x) == 0)
uap->dmarx.poll_rate = x;
else
uap->dmarx.poll_rate = 100;
- if (0 == of_property_read_u32(dev->of_node,
- "poll-timeout-ms", &x))
+ if (of_property_read_u32(dev->of_node, "poll-timeout-ms", &x) == 0)
uap->dmarx.poll_timeout = x;
else
uap->dmarx.poll_timeout = 3000;
@@ -548,14 +545,15 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap);
static void pl011_dma_tx_callback(void *data)
{
struct uart_amba_port *uap = data;
+ struct tty_port *tport = &uap->port.state->port;
struct pl011_dmatx_data *dmatx = &uap->dmatx;
unsigned long flags;
u16 dmacr;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
if (uap->dmatx.queued)
- dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
+ dmatx->len, DMA_TO_DEVICE);
dmacr = uap->dmacr;
uap->dmacr = dmacr & ~UART011_TXDMAE;
@@ -571,9 +569,9 @@ static void pl011_dma_tx_callback(void *data)
* get further refills (hence we check dmacr).
*/
if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
- uart_circ_empty(&uap->port.state->xmit)) {
+ kfifo_is_empty(&tport->xmit_fifo)) {
uap->dmatx.queued = false;
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
return;
}
@@ -584,7 +582,7 @@ static void pl011_dma_tx_callback(void *data)
*/
pl011_start_tx_pio(uap);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
/*
@@ -601,7 +599,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
struct dma_chan *chan = dmatx->chan;
struct dma_device *dma_dev = chan->device;
struct dma_async_tx_descriptor *desc;
- struct circ_buf *xmit = &uap->port.state->xmit;
+ struct tty_port *tport = &uap->port.state->port;
unsigned int count;
/*
@@ -610,7 +608,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
* the standard interrupt handling. This ensures that we
* issue a uart_write_wakeup() at the appropriate time.
*/
- count = uart_circ_chars_pending(xmit);
+ count = kfifo_len(&tport->xmit_fifo);
if (count < (uap->fifosize >> 1)) {
uap->dmatx.queued = false;
return 0;
@@ -626,33 +624,20 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
if (count > PL011_DMA_BUFFER_SIZE)
count = PL011_DMA_BUFFER_SIZE;
- if (xmit->tail < xmit->head)
- memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
- else {
- size_t first = UART_XMIT_SIZE - xmit->tail;
- size_t second;
-
- if (first > count)
- first = count;
- second = count - first;
-
- memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
- if (second)
- memcpy(&dmatx->buf[first], &xmit->buf[0], second);
- }
-
- dmatx->sg.length = count;
-
- if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
+ count = kfifo_out_peek(&tport->xmit_fifo, dmatx->buf, count);
+ dmatx->len = count;
+ dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, dmatx->dma)) {
uap->dmatx.queued = false;
dev_dbg(uap->port.dev, "unable to map TX DMA\n");
return -EBUSY;
}
- desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
- dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
+ dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
uap->dmatx.queued = false;
/*
* If DMA cannot be used right now, we complete this
@@ -680,10 +665,9 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
* Now we know that DMA will fire, so advance the ring buffer
* with the stuff we just dispatched.
*/
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
- uap->port.icount.tx += count;
+ uart_xmit_advance(&uap->port, count);
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
return 1;
@@ -762,8 +746,9 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
if (pl011_dma_tx_refill(uap) > 0) {
uap->im &= ~UART011_TXIM;
pl011_write(uap->im, uap, REG_IMSC);
- } else
+ } else {
ret = false;
+ }
} else if (!(uap->dmacr & UART011_TXDMAE)) {
uap->dmacr |= UART011_TXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
@@ -813,13 +798,11 @@ __acquires(&uap->port.lock)
if (!uap->using_tx_dma)
return;
- /* Avoid deadlock with the DMA engine callback */
- spin_unlock(&uap->port.lock);
- dmaengine_terminate_all(uap->dmatx.chan);
- spin_lock(&uap->port.lock);
+ dmaengine_terminate_async(uap->dmatx.chan);
+
if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
+ uap->dmatx.len, DMA_TO_DEVICE);
uap->dmatx.queued = false;
uap->dmacr &= ~UART011_TXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
@@ -833,17 +816,17 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
struct dma_chan *rxchan = uap->dmarx.chan;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_async_tx_descriptor *desc;
- struct pl011_sgbuf *sgbuf;
+ struct pl011_dmabuf *dbuf;
if (!rxchan)
return -EIO;
/* Start the RX DMA job */
- sgbuf = uap->dmarx.use_buf_b ?
- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
- desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ dbuf = uap->dmarx.use_buf_b ?
+ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+ desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
/*
* If the DMA engine is busy and cannot prepare a
* channel, no big deal, the driver will fall back
@@ -881,8 +864,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
bool readfifo)
{
struct tty_port *port = &uap->port.state->port;
- struct pl011_sgbuf *sgbuf = use_buf_b ?
- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ struct pl011_dmabuf *dbuf = use_buf_b ?
+ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
int dma_count = 0;
u32 fifotaken = 0; /* only used for vdbg() */
@@ -891,7 +874,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
if (uap->dmarx.poll_rate) {
/* The data can be taken by polling */
- dmataken = sgbuf->sg.length - dmarx->last_residue;
+ dmataken = dbuf->len - dmarx->last_residue;
/* Recalculate the pending size */
if (pending >= dmataken)
pending -= dmataken;
@@ -899,14 +882,12 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
/* Pick the remain data from the DMA */
if (pending) {
-
/*
* First take all chars in the DMA pipe, then look in the FIFO.
* Note that tty_insert_flip_buf() tries to take as many chars
* as it can.
*/
- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
- pending);
+ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, pending);
uap->port.icount.rx += dma_count;
if (dma_count < pending)
@@ -916,7 +897,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
/* Reset the last_residue for Rx DMA poll */
if (uap->dmarx.poll_rate)
- dmarx->last_residue = sgbuf->sg.length;
+ dmarx->last_residue = dbuf->len;
/*
* Only continue with trying to read the FIFO if all DMA chars have
@@ -941,20 +922,18 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
fifotaken = pl011_fifo_to_tty(uap);
}
- spin_unlock(&uap->port.lock);
dev_vdbg(uap->port.dev,
"Took %d chars from DMA buffer and %d chars from the FIFO\n",
dma_count, fifotaken);
tty_flip_buffer_push(port);
- spin_lock(&uap->port.lock);
}
static void pl011_dma_rx_irq(struct uart_amba_port *uap)
{
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = dmarx->chan;
- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
+ &dmarx->dbuf_b : &dmarx->dbuf_a;
size_t pending;
struct dma_tx_state state;
enum dma_status dmastat;
@@ -976,7 +955,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
pl011_write(uap->dmacr, uap, REG_DMACR);
uap->dmarx.running = false;
- pending = sgbuf->sg.length - state.residue;
+ pending = dbuf->len - state.residue;
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
/* Then we terminate the transfer - we now know our residue */
dmaengine_terminate_all(rxchan);
@@ -990,8 +969,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
/* Switch buffer & re-trigger DMA job */
dmarx->use_buf_b = !dmarx->use_buf_b;
if (pl011_dma_rx_trigger_dma(uap)) {
- dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
- "fall back to interrupt mode\n");
+ dev_dbg(uap->port.dev,
+ "could not retrigger RX DMA job fall back to interrupt mode\n");
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
}
@@ -1003,8 +982,8 @@ static void pl011_dma_rx_callback(void *data)
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = dmarx->chan;
bool lastbuf = dmarx->use_buf_b;
- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
+ &dmarx->dbuf_b : &dmarx->dbuf_a;
size_t pending;
struct dma_tx_state state;
int ret;
@@ -1016,13 +995,13 @@ static void pl011_dma_rx_callback(void *data)
* routine to flush out the secondary DMA buffer while
* we immediately trigger the next DMA job.
*/
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
/*
* Rx data can be taken by the UART interrupts during
* the DMA irq handler. So we check the residue here.
*/
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
- pending = sgbuf->sg.length - state.residue;
+ pending = dbuf->len - state.residue;
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
/* Then we terminate the transfer - we now know our residue */
dmaengine_terminate_all(rxchan);
@@ -1032,14 +1011,14 @@ static void pl011_dma_rx_callback(void *data)
ret = pl011_dma_rx_trigger_dma(uap);
pl011_dma_rx_chars(uap, pending, lastbuf, false);
- spin_unlock_irq(&uap->port.lock);
+ uart_unlock_and_check_sysrq(&uap->port);
/*
* Do this check after we picked the DMA chars so we don't
* get some IRQ immediately from RX.
*/
if (ret) {
- dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
- "fall back to interrupt mode\n");
+ dev_dbg(uap->port.dev,
+ "could not retrigger RX DMA job fall back to interrupt mode\n");
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
}
@@ -1052,6 +1031,9 @@ static void pl011_dma_rx_callback(void *data)
*/
static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
{
+ if (!uap->using_rx_dma)
+ return;
+
/* FIXME. Just disable the DMA enable */
uap->dmacr &= ~UART011_RXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
@@ -1064,24 +1046,24 @@ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
*/
static void pl011_dma_rx_poll(struct timer_list *t)
{
- struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
+ struct uart_amba_port *uap = timer_container_of(uap, t, dmarx.timer);
struct tty_port *port = &uap->port.state->port;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = uap->dmarx.chan;
- unsigned long flags = 0;
+ unsigned long flags;
unsigned int dmataken = 0;
unsigned int size = 0;
- struct pl011_sgbuf *sgbuf;
+ struct pl011_dmabuf *dbuf;
int dma_count;
struct dma_tx_state state;
- sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
if (likely(state.residue < dmarx->last_residue)) {
- dmataken = sgbuf->sg.length - dmarx->last_residue;
+ dmataken = dbuf->len - dmarx->last_residue;
size = dmarx->last_residue - state.residue;
- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
- size);
+ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
+ size);
if (dma_count == size)
dmarx->last_residue = state.residue;
dmarx->last_jiffies = jiffies;
@@ -1094,19 +1076,18 @@ static void pl011_dma_rx_poll(struct timer_list *t)
*/
if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
> uap->dmarx.poll_timeout) {
-
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
pl011_dma_rx_stop(uap);
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
uap->dmarx.running = false;
dmaengine_terminate_all(rxchan);
- del_timer(&uap->dmarx.timer);
+ timer_delete(&uap->dmarx.timer);
} else {
mod_timer(&uap->dmarx.timer,
- jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
+ jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
}
}
@@ -1122,12 +1103,11 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
if (!uap->dmatx.buf) {
- dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
uap->port.fifosize = uap->fifosize;
return;
}
- sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
+ uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
/* The DMA buffer is now the FIFO the TTY subsystem can use */
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
@@ -1137,21 +1117,21 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
goto skip_rx;
/* Allocate and map DMA RX buffers */
- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
- DMA_FROM_DEVICE);
+ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
+ DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
"RX buffer A", ret);
goto skip_rx;
}
- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
- DMA_FROM_DEVICE);
+ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
+ DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
"RX buffer B", ret);
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
- DMA_FROM_DEVICE);
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
+ DMA_FROM_DEVICE);
goto skip_rx;
}
@@ -1173,13 +1153,12 @@ skip_rx:
if (uap->using_rx_dma) {
if (pl011_dma_rx_trigger_dma(uap))
- dev_dbg(uap->port.dev, "could not trigger initial "
- "RX DMA job, fall back to interrupt mode\n");
+ dev_dbg(uap->port.dev,
+ "could not trigger initial RX DMA job, fall back to interrupt mode\n");
if (uap->dmarx.poll_rate) {
timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
mod_timer(&uap->dmarx.timer,
- jiffies +
- msecs_to_jiffies(uap->dmarx.poll_rate));
+ jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
uap->dmarx.last_jiffies = jiffies;
}
@@ -1195,17 +1174,18 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
cpu_relax();
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
pl011_write(uap->dmacr, uap, REG_DMACR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
if (uap->using_tx_dma) {
/* In theory, this should already be done by pl011_dma_flush_buffer */
dmaengine_terminate_all(uap->dmatx.chan);
if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(uap->dmatx.chan->device->dev,
+ uap->dmatx.dma, uap->dmatx.len,
+ DMA_TO_DEVICE);
uap->dmatx.queued = false;
}
@@ -1216,10 +1196,10 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
if (uap->using_rx_dma) {
dmaengine_terminate_all(uap->dmarx.chan);
/* Clean up the RX DMA */
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
if (uap->dmarx.poll_rate)
- del_timer_sync(&uap->dmarx.timer);
+ timer_delete_sync(&uap->dmarx.timer);
uap->using_rx_dma = false;
}
}
@@ -1236,10 +1216,6 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
#else
/* Blank functions if the DMA engine is not available */
-static inline void pl011_dma_probe(struct uart_amba_port *uap)
-{
-}
-
static inline void pl011_dma_remove(struct uart_amba_port *uap)
{
}
@@ -1292,14 +1268,66 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
#define pl011_dma_flush_buffer NULL
#endif
+static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
+{
+ struct uart_port *port = &uap->port;
+ u32 cr;
+
+ if (uap->rs485_tx_state == SEND)
+ uap->rs485_tx_state = WAIT_AFTER_SEND;
+
+ if (uap->rs485_tx_state == WAIT_AFTER_SEND) {
+ /* Schedule hrtimer if tx queue not empty */
+ if (!pl011_tx_empty(port)) {
+ hrtimer_start(&uap->trigger_stop_tx,
+ uap->rs485_tx_drain_interval,
+ HRTIMER_MODE_REL);
+ return;
+ }
+ if (port->rs485.delay_rts_after_send > 0) {
+ hrtimer_start(&uap->trigger_stop_tx,
+ ms_to_ktime(port->rs485.delay_rts_after_send),
+ HRTIMER_MODE_REL);
+ return;
+ }
+ /* Continue without any delay */
+ } else if (uap->rs485_tx_state == WAIT_AFTER_RTS) {
+ hrtimer_try_to_cancel(&uap->trigger_start_tx);
+ }
+
+ cr = pl011_read(uap, REG_CR);
+
+ if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ cr &= ~UART011_CR_RTS;
+ else
+ cr |= UART011_CR_RTS;
+
+ /* Disable the transmitter and reenable the transceiver */
+ cr &= ~UART011_CR_TXE;
+ cr |= UART011_CR_RXE;
+ pl011_write(cr, uap, REG_CR);
+
+ uap->rs485_tx_state = OFF;
+}
+
static void pl011_stop_tx(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ uap->rs485_tx_state == WAIT_AFTER_RTS) {
+ pl011_rs485_tx_stop(uap);
+ return;
+ }
+
uap->im &= ~UART011_TXIM;
pl011_write(uap->im, uap, REG_IMSC);
pl011_dma_tx_stop(uap);
+
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ uap->rs485_tx_state != OFF)
+ pl011_rs485_tx_stop(uap);
}
static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
@@ -1313,33 +1341,116 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap)
}
}
+static void pl011_rs485_tx_start(struct uart_amba_port *uap)
+{
+ struct uart_port *port = &uap->port;
+ u32 cr;
+
+ if (uap->rs485_tx_state == WAIT_AFTER_RTS) {
+ uap->rs485_tx_state = SEND;
+ return;
+ }
+ if (uap->rs485_tx_state == WAIT_AFTER_SEND) {
+ hrtimer_try_to_cancel(&uap->trigger_stop_tx);
+ uap->rs485_tx_state = SEND;
+ return;
+ }
+ /* uap->rs485_tx_state == OFF */
+ /* Enable transmitter */
+ cr = pl011_read(uap, REG_CR);
+ cr |= UART011_CR_TXE;
+ /* Disable receiver if half-duplex */
+ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ cr &= ~UART011_CR_RXE;
+
+ if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
+ cr &= ~UART011_CR_RTS;
+ else
+ cr |= UART011_CR_RTS;
+
+ pl011_write(cr, uap, REG_CR);
+
+ if (port->rs485.delay_rts_before_send > 0) {
+ uap->rs485_tx_state = WAIT_AFTER_RTS;
+ hrtimer_start(&uap->trigger_start_tx,
+ ms_to_ktime(port->rs485.delay_rts_before_send),
+ HRTIMER_MODE_REL);
+ } else {
+ uap->rs485_tx_state = SEND;
+ }
+}
+
static void pl011_start_tx(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
+ if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+ uap->rs485_tx_state != SEND) {
+ pl011_rs485_tx_start(uap);
+ if (uap->rs485_tx_state == WAIT_AFTER_RTS)
+ return;
+ }
+
if (!pl011_dma_tx_start(uap))
pl011_start_tx_pio(uap);
}
+static enum hrtimer_restart pl011_trigger_start_tx(struct hrtimer *t)
+{
+ struct uart_amba_port *uap =
+ container_of(t, struct uart_amba_port, trigger_start_tx);
+ unsigned long flags;
+
+ uart_port_lock_irqsave(&uap->port, &flags);
+ if (uap->rs485_tx_state == WAIT_AFTER_RTS)
+ pl011_start_tx(&uap->port);
+ uart_port_unlock_irqrestore(&uap->port, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart pl011_trigger_stop_tx(struct hrtimer *t)
+{
+ struct uart_amba_port *uap =
+ container_of(t, struct uart_amba_port, trigger_stop_tx);
+ unsigned long flags;
+
+ uart_port_lock_irqsave(&uap->port, &flags);
+ if (uap->rs485_tx_state == WAIT_AFTER_SEND)
+ pl011_rs485_tx_stop(uap);
+ uart_port_unlock_irqrestore(&uap->port, flags);
+
+ return HRTIMER_NORESTART;
+}
+
static void pl011_stop_rx(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
- uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
- UART011_PEIM|UART011_BEIM|UART011_OEIM);
+ uap->im &= ~(UART011_RXIM | UART011_RTIM | UART011_FEIM |
+ UART011_PEIM | UART011_BEIM | UART011_OEIM);
pl011_write(uap->im, uap, REG_IMSC);
pl011_dma_rx_stop(uap);
}
+static void pl011_throttle_rx(struct uart_port *port)
+{
+ unsigned long flags;
+
+ uart_port_lock_irqsave(port, &flags);
+ pl011_stop_rx(port);
+ uart_port_unlock_irqrestore(port, flags);
+}
+
static void pl011_enable_ms(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
- uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
+ uap->im |= UART011_RIMIM | UART011_CTSMIM | UART011_DCDMIM | UART011_DSRMIM;
pl011_write(uap->im, uap, REG_IMSC);
}
@@ -1349,7 +1460,7 @@ __acquires(&uap->port.lock)
{
pl011_fifo_to_tty(uap);
- spin_unlock(&uap->port.lock);
+ uart_port_unlock(&uap->port);
tty_flip_buffer_push(&uap->port.state->port);
/*
* If we were temporarily out of DMA mode for a while,
@@ -1357,8 +1468,8 @@ __acquires(&uap->port.lock)
*/
if (pl011_dma_rx_available(uap)) {
if (pl011_dma_rx_trigger_dma(uap)) {
- dev_dbg(uap->port.dev, "could not trigger RX DMA job "
- "fall back to interrupt mode again\n");
+ dev_dbg(uap->port.dev,
+ "could not trigger RX DMA job fall back to interrupt mode again\n");
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
} else {
@@ -1368,13 +1479,12 @@ __acquires(&uap->port.lock)
uap->dmarx.last_jiffies = jiffies;
uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
mod_timer(&uap->dmarx.timer,
- jiffies +
- msecs_to_jiffies(uap->dmarx.poll_rate));
+ jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
}
#endif
}
}
- spin_lock(&uap->port.lock);
+ uart_port_lock(&uap->port);
}
static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
@@ -1393,7 +1503,7 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
/* Returns true if tx interrupts have to be (kept) enabled */
static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
{
- struct circ_buf *xmit = &uap->port.state->xmit;
+ struct tty_port *tport = &uap->port.state->port;
int count = uap->fifosize >> 1;
if (uap->port.x_char) {
@@ -1402,7 +1512,7 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
uap->port.x_char = 0;
--count;
}
- if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
+ if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(&uap->port)) {
pl011_stop_tx(&uap->port);
return false;
}
@@ -1411,20 +1521,25 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
if (pl011_dma_tx_irq(uap))
return true;
- do {
+ while (1) {
+ unsigned char c;
+
if (likely(from_irq) && count-- == 0)
break;
- if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
+ if (!kfifo_peek(&tport->xmit_fifo, &c))
break;
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- } while (!uart_circ_empty(xmit));
+ if (!pl011_tx_char(uap, c, from_irq))
+ break;
+
+ kfifo_skip(&tport->xmit_fifo);
+ }
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
- if (uart_circ_empty(xmit)) {
+ if (kfifo_is_empty(&tport->xmit_fifo)) {
pl011_stop_tx(&uap->port);
return false;
}
@@ -1458,8 +1573,6 @@ static void pl011_modem_status(struct uart_amba_port *uap)
static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
{
- unsigned int dummy_read;
-
if (!uap->vendor->cts_event_workaround)
return;
@@ -1471,35 +1584,33 @@ static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
* single apb access will incur 2 pclk(133.12Mhz) delay,
* so add 2 dummy reads
*/
- dummy_read = pl011_read(uap, REG_ICR);
- dummy_read = pl011_read(uap, REG_ICR);
+ pl011_read(uap, REG_ICR);
+ pl011_read(uap, REG_ICR);
}
static irqreturn_t pl011_int(int irq, void *dev_id)
{
struct uart_amba_port *uap = dev_id;
- unsigned long flags;
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
int handled = 0;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock(&uap->port);
status = pl011_read(uap, REG_RIS) & uap->im;
if (status) {
do {
check_apply_cts_event_workaround(uap);
- pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
- UART011_RXIS),
+ pl011_write(status & ~(UART011_TXIS | UART011_RTIS | UART011_RXIS),
uap, REG_ICR);
- if (status & (UART011_RTIS|UART011_RXIS)) {
+ if (status & (UART011_RTIS | UART011_RXIS)) {
if (pl011_dma_rx_running(uap))
pl011_dma_rx_irq(uap);
else
pl011_rx_chars(uap);
}
- if (status & (UART011_DSRMIS|UART011_DCDMIS|
- UART011_CTSMIS|UART011_RIMIS))
+ if (status & (UART011_DSRMIS | UART011_DCDMIS |
+ UART011_CTSMIS | UART011_RIMIS))
pl011_modem_status(uap);
if (status & UART011_TXIS)
pl011_tx_chars(uap, true);
@@ -1512,7 +1623,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
handled = 1;
}
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_unlock_and_check_sysrq(&uap->port);
return IRQ_RETVAL(handled);
}
@@ -1529,6 +1640,12 @@ static unsigned int pl011_tx_empty(struct uart_port *port)
0 : TIOCSER_TEMT;
}
+static void pl011_maybe_set_bit(bool cond, unsigned int *ptr, unsigned int mask)
+{
+ if (cond)
+ *ptr |= mask;
+}
+
static unsigned int pl011_get_mctrl(struct uart_port *port)
{
struct uart_amba_port *uap =
@@ -1536,18 +1653,22 @@ static unsigned int pl011_get_mctrl(struct uart_port *port)
unsigned int result = 0;
unsigned int status = pl011_read(uap, REG_FR);
-#define TIOCMBIT(uartbit, tiocmbit) \
- if (status & uartbit) \
- result |= tiocmbit
+ pl011_maybe_set_bit(status & UART01x_FR_DCD, &result, TIOCM_CAR);
+ pl011_maybe_set_bit(status & uap->vendor->fr_dsr, &result, TIOCM_DSR);
+ pl011_maybe_set_bit(status & uap->vendor->fr_cts, &result, TIOCM_CTS);
+ pl011_maybe_set_bit(status & uap->vendor->fr_ri, &result, TIOCM_RNG);
- TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
- TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
- TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
- TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
-#undef TIOCMBIT
return result;
}
+static void pl011_assign_bit(bool cond, unsigned int *ptr, unsigned int mask)
+{
+ if (cond)
+ *ptr |= mask;
+ else
+ *ptr &= ~mask;
+}
+
static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_amba_port *uap =
@@ -1556,23 +1677,16 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
cr = pl011_read(uap, REG_CR);
-#define TIOCMBIT(tiocmbit, uartbit) \
- if (mctrl & tiocmbit) \
- cr |= uartbit; \
- else \
- cr &= ~uartbit
-
- TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
- TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
- TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
- TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
- TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
+ pl011_assign_bit(mctrl & TIOCM_RTS, &cr, UART011_CR_RTS);
+ pl011_assign_bit(mctrl & TIOCM_DTR, &cr, UART011_CR_DTR);
+ pl011_assign_bit(mctrl & TIOCM_OUT1, &cr, UART011_CR_OUT1);
+ pl011_assign_bit(mctrl & TIOCM_OUT2, &cr, UART011_CR_OUT2);
+ pl011_assign_bit(mctrl & TIOCM_LOOP, &cr, UART011_CR_LBE);
if (port->status & UPSTAT_AUTORTS) {
/* We need to disable auto-RTS if we want to turn RTS off */
- TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
+ pl011_assign_bit(mctrl & TIOCM_RTS, &cr, UART011_CR_RTSEN);
}
-#undef TIOCMBIT
pl011_write(cr, uap, REG_CR);
}
@@ -1584,14 +1698,14 @@ static void pl011_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
unsigned int lcr_h;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
lcr_h = pl011_read(uap, REG_LCRH_TX);
if (break_state == -1)
lcr_h |= UART01x_LCRH_BRK;
else
lcr_h &= ~UART01x_LCRH_BRK;
pl011_write(lcr_h, uap, REG_LCRH_TX);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -1638,8 +1752,7 @@ static int pl011_get_poll_char(struct uart_port *port)
return pl011_read(uap, REG_DR);
}
-static void pl011_put_poll_char(struct uart_port *port,
- unsigned char ch)
+static void pl011_put_poll_char(struct uart_port *port, unsigned char ch)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
@@ -1717,7 +1830,7 @@ static int pl011_allocate_irq(struct uart_amba_port *uap)
{
pl011_write(uap->im, uap, REG_IMSC);
- return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
+ return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap);
}
/*
@@ -1727,9 +1840,10 @@ static int pl011_allocate_irq(struct uart_amba_port *uap)
*/
static void pl011_enable_interrupts(struct uart_amba_port *uap)
{
+ unsigned long flags;
unsigned int i;
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irqsave(&uap->port, &flags);
/* Clear out any spuriously appearing RX interrupts */
pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
@@ -1751,7 +1865,30 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irqrestore(&uap->port, flags);
+}
+
+static void pl011_unthrottle_rx(struct uart_port *port)
+{
+ struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
+ unsigned long flags;
+
+ uart_port_lock_irqsave(&uap->port, &flags);
+
+ uap->im = UART011_RTIM;
+ if (!pl011_dma_rx_running(uap))
+ uap->im |= UART011_RXIM;
+
+ pl011_write(uap->im, uap, REG_IMSC);
+
+#ifdef CONFIG_DMA_ENGINE
+ if (uap->using_rx_dma) {
+ uap->dmacr |= UART011_RXDMAE;
+ pl011_write(uap->dmacr, uap, REG_DMACR);
+ }
+#endif
+
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
static int pl011_startup(struct uart_port *port)
@@ -1771,14 +1908,18 @@ static int pl011_startup(struct uart_port *port)
pl011_write(uap->vendor->ifls, uap, REG_IFLS);
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
+
+ cr = pl011_read(uap, REG_CR);
+ cr &= UART011_CR_RTS | UART011_CR_DTR;
+ cr |= UART01x_CR_UARTEN | UART011_CR_RXE;
+
+ if (!(port->rs485.flags & SER_RS485_ENABLED))
+ cr |= UART011_CR_TXE;
- /* restore RTS and DTR */
- cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
- cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
pl011_write(cr, uap, REG_CR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
/*
* initialise the old status of the modem signals
@@ -1819,14 +1960,13 @@ static int sbsa_uart_startup(struct uart_port *port)
return 0;
}
-static void pl011_shutdown_channel(struct uart_amba_port *uap,
- unsigned int lcrh)
+static void pl011_shutdown_channel(struct uart_amba_port *uap, unsigned int lcrh)
{
- unsigned long val;
+ unsigned long val;
- val = pl011_read(uap, lcrh);
- val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
- pl011_write(val, uap, lcrh);
+ val = pl011_read(uap, lcrh);
+ val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
+ pl011_write(val, uap, lcrh);
}
/*
@@ -1839,13 +1979,12 @@ static void pl011_disable_uart(struct uart_amba_port *uap)
unsigned int cr;
uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
cr = pl011_read(uap, REG_CR);
- uap->old_cr = cr;
cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
pl011_write(cr, uap, REG_CR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
/*
* disable break condition and fifos
@@ -1857,14 +1996,14 @@ static void pl011_disable_uart(struct uart_amba_port *uap)
static void pl011_disable_interrupts(struct uart_amba_port *uap)
{
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
/* mask all interrupts and clear all pending ones */
uap->im = 0;
pl011_write(uap->im, uap, REG_IMSC);
pl011_write(0xffff, uap, REG_ICR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
}
static void pl011_shutdown(struct uart_port *port)
@@ -1876,6 +2015,9 @@ static void pl011_shutdown(struct uart_port *port)
pl011_dma_shutdown(uap);
+ if ((port->rs485.flags & SER_RS485_ENABLED && uap->rs485_tx_state != OFF))
+ pl011_rs485_tx_stop(uap);
+
free_irq(uap->port.irq, uap);
pl011_disable_uart(uap);
@@ -1946,13 +2088,14 @@ pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
static void
pl011_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
unsigned int lcr_h, old_cr;
unsigned long flags;
unsigned int baud, quot, clkdiv;
+ unsigned int bits;
if (uap->vendor->oversampling)
clkdiv = 8;
@@ -1972,7 +2115,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
#endif
- if (baud > port->uartclk/16)
+ if (baud > port->uartclk / 16)
quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
else
quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
@@ -2003,21 +2146,31 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
if (uap->fifosize > 1)
lcr_h |= UART01x_LCRH_FEN;
- spin_lock_irqsave(&port->lock, flags);
+ bits = tty_get_frame_size(termios->c_cflag);
+
+ uart_port_lock_irqsave(port, &flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
+ /*
+ * Calculate the approximated time it takes to transmit one character
+ * with the given baud rate. We use this as the poll interval when we
+ * wait for the tx queue to empty.
+ */
+ uap->rs485_tx_drain_interval = ns_to_ktime(DIV_ROUND_UP(bits * NSEC_PER_SEC, baud));
+
pl011_setup_status_masks(port, termios);
if (UART_ENABLE_MS(port, termios->c_cflag))
pl011_enable_ms(port);
- /* first, disable everything */
+ if (port->rs485.flags & SER_RS485_ENABLED)
+ termios->c_cflag &= ~CRTSCTS;
+
old_cr = pl011_read(uap, REG_CR);
- pl011_write(0, uap, REG_CR);
if (termios->c_cflag & CRTSCTS) {
if (old_cr & UART011_CR_RTS)
@@ -2044,9 +2197,9 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
* else we see data corruption.
*/
if (uap->vendor->oversampling) {
- if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
+ if (baud >= 3000000 && baud < 3250000 && quot > 1)
quot -= 1;
- else if ((baud > 3250000) && (quot > 2))
+ else if (baud > 3250000 && quot > 2)
quot -= 2;
}
/* Set baud rate */
@@ -2060,14 +2213,21 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
* ----------^----------^----------^----------^-----
*/
pl011_write_lcr_h(uap, lcr_h);
+
+ /*
+ * Receive was disabled by pl011_disable_uart during shutdown.
+ * Need to reenable receive if you need to use a tty_driver
+ * returns from tty_find_polling_driver() after a port shutdown.
+ */
+ old_cr |= UART011_CR_RXE;
pl011_write(old_cr, uap, REG_CR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void
sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
@@ -2080,10 +2240,10 @@ sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
termios->c_cflag &= ~(CMSPAR | CRTSCTS);
termios->c_cflag |= CS8 | CLOCAL;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, CS8, uap->fixed_baud);
pl011_setup_status_masks(port, termios);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *pl011_type(struct uart_port *port)
@@ -2094,31 +2254,12 @@ static const char *pl011_type(struct uart_port *port)
}
/*
- * Release the memory region(s) being used by 'port'
- */
-static void pl011_release_port(struct uart_port *port)
-{
- release_mem_region(port->mapbase, SZ_4K);
-}
-
-/*
- * Request the memory region(s) being used by 'port'
- */
-static int pl011_request_port(struct uart_port *port)
-{
- return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
- != NULL ? 0 : -EBUSY;
-}
-
-/*
* Configure/autoconfigure the port.
*/
static void pl011_config_port(struct uart_port *port, int flags)
{
- if (flags & UART_CONFIG_TYPE) {
+ if (flags & UART_CONFIG_TYPE)
port->type = PORT_AMBA;
- pl011_request_port(port);
- }
}
/*
@@ -2127,15 +2268,39 @@ static void pl011_config_port(struct uart_port *port, int flags)
static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
{
int ret = 0;
+
if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
ret = -EINVAL;
- if (ser->irq < 0 || ser->irq >= nr_irqs)
+ if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs())
ret = -EINVAL;
if (ser->baud_base < 9600)
ret = -EINVAL;
+ if (port->mapbase != (unsigned long)ser->iomem_base)
+ ret = -EINVAL;
return ret;
}
+static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
+{
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
+
+ if (port->rs485.flags & SER_RS485_ENABLED)
+ pl011_rs485_tx_stop(uap);
+
+ /* Make sure auto RTS is disabled */
+ if (rs485->flags & SER_RS485_ENABLED) {
+ u32 cr = pl011_read(uap, REG_CR);
+
+ cr &= ~UART011_CR_RTSEN;
+ pl011_write(cr, uap, REG_CR);
+ port->status &= ~UPSTAT_AUTORTS;
+ }
+
+ return 0;
+}
+
static const struct uart_ops amba_pl011_pops = {
.tx_empty = pl011_tx_empty,
.set_mctrl = pl011_set_mctrl,
@@ -2143,6 +2308,8 @@ static const struct uart_ops amba_pl011_pops = {
.stop_tx = pl011_stop_tx,
.start_tx = pl011_start_tx,
.stop_rx = pl011_stop_rx,
+ .throttle = pl011_throttle_rx,
+ .unthrottle = pl011_unthrottle_rx,
.enable_ms = pl011_enable_ms,
.break_ctl = pl011_break_ctl,
.startup = pl011_startup,
@@ -2150,8 +2317,6 @@ static const struct uart_ops amba_pl011_pops = {
.flush_buffer = pl011_dma_flush_buffer,
.set_termios = pl011_set_termios,
.type = pl011_type,
- .release_port = pl011_release_port,
- .request_port = pl011_request_port,
.config_port = pl011_config_port,
.verify_port = pl011_verify_port,
#ifdef CONFIG_CONSOLE_POLL
@@ -2181,8 +2346,6 @@ static const struct uart_ops sbsa_uart_pops = {
.shutdown = sbsa_uart_shutdown,
.set_termios = sbsa_uart_set_termios,
.type = pl011_type,
- .release_port = pl011_release_port,
- .request_port = pl011_request_port,
.config_port = pl011_config_port,
.verify_port = pl011_verify_port,
#ifdef CONFIG_CONSOLE_POLL
@@ -2196,7 +2359,7 @@ static struct uart_amba_port *amba_ports[UART_NR];
#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
-static void pl011_console_putchar(struct uart_port *port, int ch)
+static void pl011_console_putchar(struct uart_port *port, unsigned char ch)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
@@ -2204,92 +2367,43 @@ static void pl011_console_putchar(struct uart_port *port, int ch)
while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
cpu_relax();
pl011_write(ch, uap, REG_DR);
+ uap->console_line_ended = (ch == '\n');
}
-static void
-pl011_console_write(struct console *co, const char *s, unsigned int count)
+static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
+ int *parity, int *bits)
{
- struct uart_amba_port *uap = amba_ports[co->index];
- unsigned int old_cr = 0, new_cr;
- unsigned long flags;
- int locked = 1;
+ unsigned int lcr_h, ibrd, fbrd;
- clk_enable(uap->clk);
+ if (!(pl011_read(uap, REG_CR) & UART01x_CR_UARTEN))
+ return;
- local_irq_save(flags);
- if (uap->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = spin_trylock(&uap->port.lock);
- else
- spin_lock(&uap->port.lock);
+ lcr_h = pl011_read(uap, REG_LCRH_TX);
- /*
- * First save the CR then disable the interrupts
- */
- if (!uap->vendor->always_enabled) {
- old_cr = pl011_read(uap, REG_CR);
- new_cr = old_cr & ~UART011_CR_CTSEN;
- new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
- pl011_write(new_cr, uap, REG_CR);
+ *parity = 'n';
+ if (lcr_h & UART01x_LCRH_PEN) {
+ if (lcr_h & UART01x_LCRH_EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
}
- uart_console_write(&uap->port, s, count, pl011_console_putchar);
-
- /*
- * Finally, wait for transmitter to become empty and restore the
- * TCR. Allow feature register bits to be inverted to work around
- * errata.
- */
- while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
- & uap->vendor->fr_busy)
- cpu_relax();
- if (!uap->vendor->always_enabled)
- pl011_write(old_cr, uap, REG_CR);
-
- if (locked)
- spin_unlock(&uap->port.lock);
- local_irq_restore(flags);
-
- clk_disable(uap->clk);
-}
-
-static void __init
-pl011_console_get_options(struct uart_amba_port *uap, int *baud,
- int *parity, int *bits)
-{
- if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
- unsigned int lcr_h, ibrd, fbrd;
-
- lcr_h = pl011_read(uap, REG_LCRH_TX);
-
- *parity = 'n';
- if (lcr_h & UART01x_LCRH_PEN) {
- if (lcr_h & UART01x_LCRH_EPS)
- *parity = 'e';
- else
- *parity = 'o';
- }
-
- if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
- *bits = 7;
- else
- *bits = 8;
+ if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
+ *bits = 7;
+ else
+ *bits = 8;
- ibrd = pl011_read(uap, REG_IBRD);
- fbrd = pl011_read(uap, REG_FBRD);
+ ibrd = pl011_read(uap, REG_IBRD);
+ fbrd = pl011_read(uap, REG_FBRD);
- *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
+ *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
- if (uap->vendor->oversampling) {
- if (pl011_read(uap, REG_CR)
- & ST_UART011_CR_OVSFACT)
- *baud *= 2;
- }
- }
+ if (uap->vendor->oversampling &&
+ (pl011_read(uap, REG_CR) & ST_UART011_CR_OVSFACT))
+ *baud *= 2;
}
-static int __init pl011_console_setup(struct console *co, char *options)
+static int pl011_console_setup(struct console *co, char *options)
{
struct uart_amba_port *uap;
int baud = 38400;
@@ -2316,6 +2430,8 @@ static int __init pl011_console_setup(struct console *co, char *options)
if (ret)
return ret;
+ uap->console_line_ended = true;
+
if (dev_get_platdata(uap->port.dev)) {
struct amba_pl011_data *plat;
@@ -2357,10 +2473,10 @@ static int __init pl011_console_setup(struct console *co, char *options)
*
* Returns 0 if console matches; otherwise non-zero to use default matching
*/
-static int __init pl011_console_match(struct console *co, char *name, int idx,
- char *options)
+static int pl011_console_match(struct console *co, char *name, int idx,
+ char *options)
{
- unsigned char iotype;
+ enum uart_iotype iotype;
resource_size_t addr;
int i;
@@ -2392,28 +2508,119 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
continue;
co->index = i;
- port->cons = co;
+ uart_port_set_cons(port, co);
return pl011_console_setup(co, options);
}
return -ENODEV;
}
+static void
+pl011_console_write_atomic(struct console *co, struct nbcon_write_context *wctxt)
+{
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int old_cr = 0;
+
+ if (!nbcon_enter_unsafe(wctxt))
+ return;
+
+ clk_enable(uap->clk);
+
+ if (!uap->vendor->always_enabled) {
+ old_cr = pl011_read(uap, REG_CR);
+ pl011_write((old_cr & ~UART011_CR_CTSEN) | (UART01x_CR_UARTEN | UART011_CR_TXE),
+ uap, REG_CR);
+ }
+
+ if (!uap->console_line_ended)
+ uart_console_write(&uap->port, "\n", 1, pl011_console_putchar);
+ uart_console_write(&uap->port, wctxt->outbuf, wctxt->len, pl011_console_putchar);
+
+ while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy)
+ cpu_relax();
+
+ if (!uap->vendor->always_enabled)
+ pl011_write(old_cr, uap, REG_CR);
+
+ clk_disable(uap->clk);
+
+ nbcon_exit_unsafe(wctxt);
+}
+
+static void
+pl011_console_write_thread(struct console *co, struct nbcon_write_context *wctxt)
+{
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int old_cr = 0;
+
+ if (!nbcon_enter_unsafe(wctxt))
+ return;
+
+ clk_enable(uap->clk);
+
+ if (!uap->vendor->always_enabled) {
+ old_cr = pl011_read(uap, REG_CR);
+ pl011_write((old_cr & ~UART011_CR_CTSEN) | (UART01x_CR_UARTEN | UART011_CR_TXE),
+ uap, REG_CR);
+ }
+
+ if (nbcon_exit_unsafe(wctxt)) {
+ int i;
+ unsigned int len = READ_ONCE(wctxt->len);
+
+ for (i = 0; i < len; i++) {
+ if (!nbcon_enter_unsafe(wctxt))
+ break;
+ uart_console_write(&uap->port, wctxt->outbuf + i, 1, pl011_console_putchar);
+ if (!nbcon_exit_unsafe(wctxt))
+ break;
+ }
+ }
+
+ while (!nbcon_enter_unsafe(wctxt))
+ nbcon_reacquire_nobuf(wctxt);
+
+ while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy)
+ cpu_relax();
+
+ if (!uap->vendor->always_enabled)
+ pl011_write(old_cr, uap, REG_CR);
+
+ clk_disable(uap->clk);
+
+ nbcon_exit_unsafe(wctxt);
+}
+
+static void
+pl011_console_device_lock(struct console *co, unsigned long *flags)
+{
+ __uart_port_lock_irqsave(&amba_ports[co->index]->port, flags);
+}
+
+static void
+pl011_console_device_unlock(struct console *co, unsigned long flags)
+{
+ __uart_port_unlock_irqrestore(&amba_ports[co->index]->port, flags);
+}
+
static struct uart_driver amba_reg;
static struct console amba_console = {
.name = "ttyAMA",
- .write = pl011_console_write,
.device = uart_console_device,
.setup = pl011_console_setup,
.match = pl011_console_match,
- .flags = CON_PRINTBUFFER | CON_ANYTIME,
+ .write_atomic = pl011_console_write_atomic,
+ .write_thread = pl011_console_write_thread,
+ .device_lock = pl011_console_device_lock,
+ .device_unlock = pl011_console_device_unlock,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_NBCON,
.index = -1,
.data = &amba_reg,
};
#define AMBA_CONSOLE (&amba_console)
-static void qdf2400_e44_putc(struct uart_port *port, int c)
+static void qdf2400_e44_putc(struct uart_port *port, unsigned char c)
{
while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
cpu_relax();
@@ -2422,14 +2629,14 @@ static void qdf2400_e44_putc(struct uart_port *port, int c)
cpu_relax();
}
-static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
+static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
}
-static void pl011_putc(struct uart_port *port, int c)
+static void pl011_putc(struct uart_port *port, unsigned char c)
{
while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
cpu_relax();
@@ -2441,13 +2648,44 @@ static void pl011_putc(struct uart_port *port, int c)
cpu_relax();
}
-static void pl011_early_write(struct console *con, const char *s, unsigned n)
+static void pl011_early_write(struct console *con, const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, pl011_putc);
}
+#ifdef CONFIG_CONSOLE_POLL
+static int pl011_getc(struct uart_port *port)
+{
+ if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE)
+ return NO_POLL_CHAR;
+
+ if (port->iotype == UPIO_MEM32)
+ return readl(port->membase + UART01x_DR);
+ else
+ return readb(port->membase + UART01x_DR);
+}
+
+static int pl011_early_read(struct console *con, char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+ int ch, num_read = 0;
+
+ while (num_read < n) {
+ ch = pl011_getc(&dev->port);
+ if (ch == NO_POLL_CHAR)
+ break;
+
+ s[num_read++] = ch;
+ }
+
+ return num_read;
+}
+#else
+#define pl011_early_read NULL
+#endif
+
/*
* On non-ACPI systems, earlycon is enabled by specifying
* "earlycon=pl011,<address>" on the kernel command line.
@@ -2467,10 +2705,13 @@ static int __init pl011_early_console_setup(struct earlycon_device *device,
return -ENODEV;
device->con->write = pl011_early_write;
+ device->con->read = pl011_early_read;
return 0;
}
+
OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
+
OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
/*
@@ -2493,6 +2734,7 @@ qdf2400_e44_early_console_setup(struct earlycon_device *device,
device->con->write = qdf2400_e44_early_write;
return 0;
}
+
EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
#else
@@ -2512,8 +2754,8 @@ static struct uart_driver amba_reg = {
static int pl011_probe_dt_alias(int index, struct device *dev)
{
struct device_node *np;
- static bool seen_dev_with_alias = false;
- static bool seen_dev_without_alias = false;
+ static bool seen_dev_with_alias;
+ static bool seen_dev_without_alias;
int ret = index;
if (!IS_ENABLED(CONFIG_OF))
@@ -2529,7 +2771,7 @@ static int pl011_probe_dt_alias(int index, struct device *dev)
ret = index;
} else {
seen_dev_with_alias = true;
- if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
+ if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret]) {
dev_warn(dev, "requested serial port %d not available.\n", ret);
ret = index;
}
@@ -2563,7 +2805,7 @@ static int pl011_find_free_port(void)
int i;
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
- if (amba_ports[i] == NULL)
+ if (!amba_ports[i])
return i;
return -EBUSY;
@@ -2573,6 +2815,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
struct resource *mmiobase, int index)
{
void __iomem *base;
+ int ret;
base = devm_ioremap_resource(dev, mmiobase);
if (IS_ERR(base))
@@ -2580,14 +2823,18 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
index = pl011_probe_dt_alias(index, dev);
- uap->old_cr = 0;
uap->port.dev = dev;
uap->port.mapbase = mmiobase->start;
uap->port.membase = base;
uap->port.fifosize = uap->fifosize;
+ uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = index;
+ ret = uart_get_rs485_mode(&uap->port);
+ if (ret)
+ return ret;
+
amba_ports[index] = uap;
return 0;
@@ -2595,7 +2842,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
static int pl011_register_port(struct uart_amba_port *uap)
{
- int ret;
+ int ret, i;
/* Ensure interrupts from this UART are masked and cleared */
pl011_write(0, uap, REG_IMSC);
@@ -2606,6 +2853,9 @@ static int pl011_register_port(struct uart_amba_port *uap)
if (ret < 0) {
dev_err(uap->port.dev,
"Failed to register AMBA-PL011 driver\n");
+ for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
+ if (amba_ports[i] == uap)
+ amba_ports[i] = NULL;
return ret;
}
}
@@ -2617,11 +2867,19 @@ static int pl011_register_port(struct uart_amba_port *uap)
return ret;
}
+static const struct serial_rs485 pl011_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;
int portnr, ret;
+ u32 val;
portnr = pl011_find_free_port();
if (portnr < 0)
@@ -2642,9 +2900,29 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
uap->port.irq = dev->irq[0];
uap->port.ops = &amba_pl011_pops;
-
+ uap->port.rs485_config = pl011_rs485_config;
+ uap->port.rs485_supported = pl011_rs485_supported;
snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
+ if (device_property_read_u32(&dev->dev, "reg-io-width", &val) == 0) {
+ switch (val) {
+ case 1:
+ uap->port.iotype = UPIO_MEM;
+ break;
+ case 4:
+ uap->port.iotype = UPIO_MEM32;
+ break;
+ default:
+ dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+ hrtimer_setup(&uap->trigger_start_tx, pl011_trigger_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&uap->trigger_stop_tx, pl011_trigger_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
if (ret)
return ret;
@@ -2654,13 +2932,12 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
return pl011_register_port(uap);
}
-static int pl011_remove(struct amba_device *dev)
+static void pl011_remove(struct amba_device *dev)
{
struct uart_amba_port *uap = amba_get_drvdata(dev);
uart_remove_one_port(&amba_reg, &uap->port);
pl011_unregister_port(uap);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -2687,6 +2964,22 @@ static int pl011_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
+#ifdef CONFIG_ACPI_SPCR_TABLE
+static void qpdf2400_erratum44_workaround(struct device *dev,
+ struct uart_amba_port *uap)
+{
+ if (!qdf2400_e44_present)
+ return;
+
+ dev_info(dev, "working around QDF2400 SoC erratum 44\n");
+ uap->vendor = &vendor_qdt_qdf2400_e44;
+}
+#else
+static void qpdf2400_erratum44_workaround(struct device *dev,
+ struct uart_amba_port *uap)
+{ /* empty */ }
+#endif
+
static int sbsa_uart_probe(struct platform_device *pdev)
{
struct uart_amba_port *uap;
@@ -2718,20 +3011,12 @@ static int sbsa_uart_probe(struct platform_device *pdev)
return -ENOMEM;
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "cannot obtain irq\n");
+ if (ret < 0)
return ret;
- }
uap->port.irq = ret;
-#ifdef CONFIG_ACPI_SPCR_TABLE
- if (qdf2400_e44_present) {
- dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
- uap->vendor = &vendor_qdt_qdf2400_e44;
- } else
-#endif
- uap->vendor = &vendor_sbsa;
+ uap->vendor = &vendor_sbsa;
+ qpdf2400_erratum44_workaround(&pdev->dev, uap);
uap->reg_offset = uap->vendor->reg_offset;
uap->fifosize = 32;
@@ -2752,13 +3037,12 @@ static int sbsa_uart_probe(struct platform_device *pdev)
return pl011_register_port(uap);
}
-static int sbsa_uart_remove(struct platform_device *pdev)
+static void sbsa_uart_remove(struct platform_device *pdev)
{
struct uart_amba_port *uap = platform_get_drvdata(pdev);
uart_remove_one_port(&amba_reg, &uap->port);
pl011_unregister_port(uap);
- return 0;
}
static const struct of_device_id sbsa_uart_of_match[] = {
@@ -2769,6 +3053,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
static const struct acpi_device_id sbsa_uart_acpi_match[] = {
{ "ARMH0011", 0 },
+ { "ARMHB000", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
@@ -2778,8 +3063,9 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
.remove = sbsa_uart_remove,
.driver = {
.name = "sbsa-uart",
- .of_match_table = of_match_ptr(sbsa_uart_of_match),
- .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
+ .pm = &pl011_dev_pm_ops,
+ .of_match_table = sbsa_uart_of_match,
+ .acpi_match_table = sbsa_uart_acpi_match,
.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
},
};
@@ -2795,11 +3081,6 @@ static const struct amba_id pl011_ids[] = {
.mask = 0x00ffffff,
.data = &vendor_st,
},
- {
- .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
- .mask = 0x00ffffff,
- .data = &vendor_zte,
- },
{ 0, 0 },
};
@@ -2818,7 +3099,7 @@ static struct amba_driver pl011_driver = {
static int __init pl011_init(void)
{
- printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
+ pr_info("Serial: AMBA PL011 UART driver\n");
if (platform_driver_register(&arm_sbsa_uart_platform_driver))
pr_warn("could not register SBSA UART platform driver\n");