summaryrefslogtreecommitdiff
path: root/drivers/net/wan/ixp4xx_hss.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wan/ixp4xx_hss.c')
-rw-r--r--drivers/net/wan/ixp4xx_hss.c503
1 files changed, 314 insertions, 189 deletions
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index e7bbdb7af53a..720c5dc889ea 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel IXP4xx HSS (synchronous serial port) driver for Linux
*
* Copyright (C) 2007-2008 Krzysztof HaƂasa <khc@pm.waw.pl>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -19,11 +16,21 @@
#include <linux/hdlc.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
+#include <linux/soc/ixp4xx/cpu.h>
+
+/* This is what all IXP4xx platforms we know uses, if more frequencies
+ * are needed, we need to migrate to the clock framework.
+ */
+#define IXP4XX_TIMER_FREQ 66666000
#define DEBUG_DESC 0
#define DEBUG_RX 0
@@ -51,7 +58,6 @@
#define NAPI_WEIGHT 16
/* Queue IDs */
-#define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */
#define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */
#define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */
#define HSS0_PKT_TX1_QUEUE 15
@@ -63,7 +69,6 @@
#define HSS0_PKT_RXFREE3_QUEUE 21
#define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */
-#define HSS1_CHL_RXTRIG_QUEUE 10
#define HSS1_PKT_RX_QUEUE 0
#define HSS1_PKT_TX0_QUEUE 5
#define HSS1_PKT_TX1_QUEUE 6
@@ -85,7 +90,6 @@
#define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
#define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
-
/* hss_config, PCRs */
/* Frame sync sampling, default = active low */
#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
@@ -152,26 +156,24 @@
/* HSS number, default = 0 (first) */
#define CCR_SECOND_HSS 0x01000000
-
/* hss_config, clkCR: main:10, num:10, denom:12 */
-#define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/
-
-#define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
-#define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
-#define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
-#define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
-#define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
-#define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
-
-#define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
-#define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
-#define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
-#define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
-#define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
-#define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
-
-/*
- * HSS_CONFIG_CLOCK_CR register consists of 3 parts:
+#define CLK42X_SPEED_EXP ((0x3FF << 22) | (2 << 12) | 15) /*65 KHz*/
+
+#define CLK42X_SPEED_512KHZ ((130 << 22) | (2 << 12) | 15)
+#define CLK42X_SPEED_1536KHZ ((43 << 22) | (18 << 12) | 47)
+#define CLK42X_SPEED_1544KHZ ((43 << 22) | (33 << 12) | 192)
+#define CLK42X_SPEED_2048KHZ ((32 << 22) | (34 << 12) | 63)
+#define CLK42X_SPEED_4096KHZ ((16 << 22) | (34 << 12) | 127)
+#define CLK42X_SPEED_8192KHZ ((8 << 22) | (34 << 12) | 255)
+
+#define CLK46X_SPEED_512KHZ ((130 << 22) | (24 << 12) | 127)
+#define CLK46X_SPEED_1536KHZ ((43 << 22) | (152 << 12) | 383)
+#define CLK46X_SPEED_1544KHZ ((43 << 22) | (66 << 12) | 385)
+#define CLK46X_SPEED_2048KHZ ((32 << 22) | (280 << 12) | 511)
+#define CLK46X_SPEED_4096KHZ ((16 << 22) | (280 << 12) | 1023)
+#define CLK46X_SPEED_8192KHZ ((8 << 22) | (280 << 12) | 2047)
+
+/* HSS_CONFIG_CLOCK_CR register consists of 3 parts:
* A (10 bits), B (10 bits) and C (12 bits).
* IXP42x HSS clock generator operation (verified with an oscilloscope):
* Each clock bit takes 7.5 ns (1 / 133.xx MHz).
@@ -210,7 +212,6 @@
#define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
#define HSS_CONFIG_RX_LUT 0x38
-
/* NPE command codes */
/* writes the ConfigWord value to the location specified by offset */
#define PORT_CONFIG_WRITE 0x40
@@ -222,7 +223,8 @@
#define PORT_ERROR_READ 0x42
/* triggers the NPE to reset internal status and enable the HssPacketized
- operation for the flow specified by pPipe */
+ * operation for the flow specified by pPipe
+ */
#define PKT_PIPE_FLOW_ENABLE 0x50
#define PKT_PIPE_FLOW_DISABLE 0x51
#define PKT_NUM_PIPES_WRITE 0x52
@@ -237,16 +239,16 @@
#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
- this packet (if buf_len < pkt_len) */
+ * this packet (if buf_len < pkt_len)
+ */
#define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
#define ERR_HDLC_ABORT 6 /* abort sequence received */
#define ERR_DISCONNECTING 7 /* disconnect is in progress */
-
#ifdef __ARMEB__
typedef struct sk_buff buffer_t;
#define free_buffer dev_kfree_skb
-#define free_buffer_irq dev_kfree_skb_irq
+#define free_buffer_irq dev_consume_skb_irq
#else
typedef void buffer_t;
#define free_buffer kfree
@@ -256,12 +258,22 @@ typedef void buffer_t;
struct port {
struct device *dev;
struct npe *npe;
+ unsigned int txreadyq;
+ unsigned int rxtrigq;
+ unsigned int rxfreeq;
+ unsigned int rxq;
+ unsigned int txq;
+ unsigned int txdoneq;
+ struct gpio_desc *cts;
+ struct gpio_desc *rts;
+ struct gpio_desc *dcd;
+ struct gpio_desc *dtr;
+ struct gpio_desc *clk_internal;
struct net_device *netdev;
struct napi_struct napi;
- struct hss_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
- u32 desc_tab_phys;
+ dma_addr_t desc_tab_phys;
unsigned int id;
unsigned int clock_type, clock_rate, loopback;
unsigned int initialized, carrier;
@@ -310,7 +322,6 @@ struct desc {
u32 __reserved1[4];
};
-
#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
(n) * sizeof(struct desc))
#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
@@ -325,21 +336,13 @@ struct desc {
static int ports_open;
static struct dma_pool *dma_pool;
-static spinlock_t npe_lock;
-
-static const struct {
- int tx, txdone, rx, rxfree;
-}queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
- HSS0_PKT_RXFREE0_QUEUE},
- {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
- HSS1_PKT_RXFREE0_QUEUE},
-};
+static DEFINE_SPINLOCK(npe_lock);
/*****************************************************************************
* utility functions
****************************************************************************/
-static inline struct port* dev_to_port(struct net_device *dev)
+static inline struct port *dev_to_port(struct net_device *dev)
{
return dev_to_hdlc(dev)->priv;
}
@@ -348,6 +351,7 @@ static inline struct port* dev_to_port(struct net_device *dev)
static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
{
int i;
+
for (i = 0; i < cnt; i++)
dest[i] = swab32(src[i]);
}
@@ -357,9 +361,10 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
* HSS access
****************************************************************************/
-static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
+static void hss_npe_send(struct port *port, struct msg *msg, const char *what)
{
- u32 *val = (u32*)msg;
+ u32 *val = (u32 *)msg;
+
if (npe_send_message(port->npe, msg, what)) {
pr_crit("HSS-%i: unable to send command [%08X:%08X] to %s\n",
port->id, val[0], val[1], npe_name(port->npe));
@@ -515,10 +520,12 @@ static int hss_load_firmware(struct port *port)
if (port->initialized)
return 0;
- if (!npe_running(port->npe) &&
- (err = npe_load_firmware(port->npe, npe_name(port->npe),
- port->dev)))
- return err;
+ if (!npe_running(port->npe)) {
+ err = npe_load_firmware(port->npe, npe_name(port->npe),
+ port->dev);
+ if (err)
+ return err;
+ }
/* HDLC mode configuration */
memset(&msg, 0, sizeof(msg));
@@ -569,7 +576,6 @@ static inline void debug_pkt(struct net_device *dev, const char *func,
#endif
}
-
static inline void debug_desc(u32 phys, struct desc *desc)
{
#if DEBUG_DESC
@@ -585,7 +591,8 @@ static inline int queue_get_desc(unsigned int queue, struct port *port,
u32 phys, tab_phys, n_desc;
struct desc *tab;
- if (!(phys = qmgr_get_entry(queue)))
+ phys = qmgr_get_entry(queue);
+ if (!phys)
return -1;
BUG_ON(phys & 0x1F);
@@ -605,10 +612,10 @@ static inline void queue_put_desc(unsigned int queue, u32 phys,
BUG_ON(phys & 0x1F);
qmgr_put_entry(queue, phys);
/* Don't check for queue overflow here, we've allocated sufficient
- length and queues >= 32 don't support this check anyway. */
+ * length and queues >= 32 don't support this check anyway.
+ */
}
-
static inline void dma_unmap_tx(struct port *port, struct desc *desc)
{
#ifdef __ARMEB__
@@ -621,7 +628,6 @@ static inline void dma_unmap_tx(struct port *port, struct desc *desc)
#endif
}
-
static void hss_hdlc_set_carrier(void *pdev, int carrier)
{
struct net_device *netdev = pdev;
@@ -647,7 +653,7 @@ static void hss_hdlc_rx_irq(void *pdev)
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
#endif
- qmgr_disable_irq(queue_ids[port->id].rx);
+ qmgr_disable_irq(port->rxq);
napi_schedule(&port->napi);
}
@@ -655,8 +661,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
{
struct port *port = container_of(napi, struct port, napi);
struct net_device *dev = port->netdev;
- unsigned int rxq = queue_ids[port->id].rx;
- unsigned int rxfreeq = queue_ids[port->id].rxfree;
+ unsigned int rxq = port->rxq;
+ unsigned int rxfreeq = port->rxfreeq;
int received = 0;
#if DEBUG_RX
@@ -672,7 +678,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
u32 phys;
#endif
- if ((n = queue_get_desc(rxq, port, 0)) < 0) {
+ n = queue_get_desc(rxq, port, 0);
+ if (n < 0) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll"
" napi_complete\n", dev->name);
@@ -680,10 +687,10 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_empty(rxq) &&
- napi_reschedule(napi)) {
+ napi_schedule(napi)) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll"
- " napi_reschedule succeeded\n",
+ " napi_schedule succeeded\n",
dev->name);
#endif
qmgr_disable_irq(rxq);
@@ -707,7 +714,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
switch (desc->status) {
case 0:
#ifdef __ARMEB__
- if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
+ skb = netdev_alloc_skb(dev, RX_SIZE);
+ if (skb) {
phys = dma_map_single(&dev->dev, skb->data,
RX_SIZE,
DMA_FROM_DEVICE);
@@ -786,7 +794,6 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
return received; /* not all work done */
}
-
static void hss_hdlc_txdone_irq(void *pdev)
{
struct net_device *dev = pdev;
@@ -796,7 +803,7 @@ static void hss_hdlc_txdone_irq(void *pdev)
#if DEBUG_TX
printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
#endif
- while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
+ while ((n_desc = queue_get_desc(port->txdoneq,
port, 1)) >= 0) {
struct desc *desc;
int start;
@@ -814,8 +821,8 @@ static void hss_hdlc_txdone_irq(void *pdev)
free_buffer_irq(port->tx_buff_tab[n_desc]);
port->tx_buff_tab[n_desc] = NULL;
- start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
- queue_put_desc(port->plat->txreadyq,
+ start = qmgr_stat_below_low_watermark(port->txreadyq);
+ queue_put_desc(port->txreadyq,
tx_desc_phys(port, n_desc), desc);
if (start) { /* TX-ready queue was empty */
#if DEBUG_TX
@@ -830,7 +837,7 @@ static void hss_hdlc_txdone_irq(void *pdev)
static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = dev_to_port(dev);
- unsigned int txreadyq = port->plat->txreadyq;
+ unsigned int txreadyq = port->txreadyq;
int len, offset, bytes, n;
void *mem;
u32 phys;
@@ -856,12 +863,13 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
#else
offset = (int)skb->data & 3; /* keep 32-bit alignment */
bytes = ALIGN(offset + len, 4);
- if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
+ mem = kmalloc(bytes, GFP_ATOMIC);
+ if (!mem) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
+ memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4);
dev_kfree_skb(skb);
#endif
@@ -889,7 +897,7 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
desc->buf_len = desc->pkt_len = len;
wmb();
- queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
+ queue_put_desc(port->txq, tx_desc_phys(port, n), desc);
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
@@ -912,45 +920,44 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-
static int request_hdlc_queues(struct port *port)
{
int err;
- err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
+ err = qmgr_request_queue(port->rxfreeq, RX_DESCS, 0, 0,
"%s:RX-free", port->netdev->name);
if (err)
return err;
- err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0,
+ err = qmgr_request_queue(port->rxq, RX_DESCS, 0, 0,
"%s:RX", port->netdev->name);
if (err)
goto rel_rxfree;
- err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0,
+ err = qmgr_request_queue(port->txq, TX_DESCS, 0, 0,
"%s:TX", port->netdev->name);
if (err)
goto rel_rx;
- err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
+ err = qmgr_request_queue(port->txreadyq, TX_DESCS, 0, 0,
"%s:TX-ready", port->netdev->name);
if (err)
goto rel_tx;
- err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0,
+ err = qmgr_request_queue(port->txdoneq, TX_DESCS, 0, 0,
"%s:TX-done", port->netdev->name);
if (err)
goto rel_txready;
return 0;
rel_txready:
- qmgr_release_queue(port->plat->txreadyq);
+ qmgr_release_queue(port->txreadyq);
rel_tx:
- qmgr_release_queue(queue_ids[port->id].tx);
+ qmgr_release_queue(port->txq);
rel_rx:
- qmgr_release_queue(queue_ids[port->id].rx);
+ qmgr_release_queue(port->rxq);
rel_rxfree:
- qmgr_release_queue(queue_ids[port->id].rxfree);
+ qmgr_release_queue(port->rxfreeq);
printk(KERN_DEBUG "%s: unable to request hardware queues\n",
port->netdev->name);
return err;
@@ -958,11 +965,11 @@ rel_rxfree:
static void release_hdlc_queues(struct port *port)
{
- qmgr_release_queue(queue_ids[port->id].rxfree);
- qmgr_release_queue(queue_ids[port->id].rx);
- qmgr_release_queue(queue_ids[port->id].txdone);
- qmgr_release_queue(queue_ids[port->id].tx);
- qmgr_release_queue(port->plat->txreadyq);
+ qmgr_release_queue(port->rxfreeq);
+ qmgr_release_queue(port->rxq);
+ qmgr_release_queue(port->txdoneq);
+ qmgr_release_queue(port->txq);
+ qmgr_release_queue(port->txreadyq);
}
static int init_hdlc_queues(struct port *port)
@@ -976,10 +983,10 @@ static int init_hdlc_queues(struct port *port)
return -ENOMEM;
}
- if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
- &port->desc_tab_phys)))
+ port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL,
+ &port->desc_tab_phys);
+ if (!port->desc_tab)
return -ENOMEM;
- memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
@@ -989,11 +996,13 @@ static int init_hdlc_queues(struct port *port)
buffer_t *buff;
void *data;
#ifdef __ARMEB__
- if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
+ buff = netdev_alloc_skb(port->netdev, RX_SIZE);
+ if (!buff)
return -ENOMEM;
data = buff->data;
#else
- if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
+ buff = kmalloc(RX_SIZE, GFP_KERNEL);
+ if (!buff)
return -ENOMEM;
data = buff;
#endif
@@ -1018,6 +1027,7 @@ static void destroy_hdlc_queues(struct port *port)
for (i = 0; i < RX_DESCS; i++) {
struct desc *desc = rx_desc_ptr(port, i);
buffer_t *buff = port->rx_buff_tab[i];
+
if (buff) {
dma_unmap_single(&port->netdev->dev,
desc->data, RX_SIZE,
@@ -1028,6 +1038,7 @@ static void destroy_hdlc_queues(struct port *port)
for (i = 0; i < TX_DESCS; i++) {
struct desc *desc = tx_desc_ptr(port, i);
buffer_t *buff = port->tx_buff_tab[i];
+
if (buff) {
dma_unmap_tx(port, desc);
free_buffer(buff);
@@ -1043,49 +1054,80 @@ static void destroy_hdlc_queues(struct port *port)
}
}
+static irqreturn_t hss_hdlc_dcd_irq(int irq, void *data)
+{
+ struct net_device *dev = data;
+ struct port *port = dev_to_port(dev);
+ int val;
+
+ val = gpiod_get_value(port->dcd);
+ hss_hdlc_set_carrier(dev, val);
+
+ return IRQ_HANDLED;
+}
+
static int hss_hdlc_open(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
unsigned long flags;
int i, err = 0;
+ int val;
- if ((err = hdlc_open(dev)))
+ err = hdlc_open(dev);
+ if (err)
return err;
- if ((err = hss_load_firmware(port)))
+ err = hss_load_firmware(port);
+ if (err)
goto err_hdlc_close;
- if ((err = request_hdlc_queues(port)))
+ err = request_hdlc_queues(port);
+ if (err)
goto err_hdlc_close;
- if ((err = init_hdlc_queues(port)))
+ err = init_hdlc_queues(port);
+ if (err)
goto err_destroy_queues;
spin_lock_irqsave(&npe_lock, flags);
- if (port->plat->open)
- if ((err = port->plat->open(port->id, dev,
- hss_hdlc_set_carrier)))
- goto err_unlock;
+
+ /* Set the carrier, the GPIO is flagged active low so this will return
+ * 1 if DCD is asserted.
+ */
+ val = gpiod_get_value(port->dcd);
+ hss_hdlc_set_carrier(dev, val);
+
+ /* Set up an IRQ for DCD */
+ err = request_irq(gpiod_to_irq(port->dcd), hss_hdlc_dcd_irq, 0, "IXP4xx HSS", dev);
+ if (err) {
+ dev_err(&dev->dev, "ixp4xx_hss: failed to request DCD IRQ (%i)\n", err);
+ goto err_unlock;
+ }
+
+ /* GPIOs are flagged active low so this asserts DTR and RTS */
+ gpiod_set_value(port->dtr, 1);
+ gpiod_set_value(port->rts, 1);
+
spin_unlock_irqrestore(&npe_lock, flags);
/* Populate queues with buffers, no failure after this point */
for (i = 0; i < TX_DESCS; i++)
- queue_put_desc(port->plat->txreadyq,
+ queue_put_desc(port->txreadyq,
tx_desc_phys(port, i), tx_desc_ptr(port, i));
for (i = 0; i < RX_DESCS; i++)
- queue_put_desc(queue_ids[port->id].rxfree,
+ queue_put_desc(port->rxfreeq,
rx_desc_phys(port, i), rx_desc_ptr(port, i));
napi_enable(&port->napi);
netif_start_queue(dev);
- qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
+ qmgr_set_irq(port->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
hss_hdlc_rx_irq, dev);
- qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
+ qmgr_set_irq(port->txdoneq, QUEUE_IRQ_SRC_NOT_EMPTY,
hss_hdlc_txdone_irq, dev);
- qmgr_enable_irq(queue_ids[port->id].txdone);
+ qmgr_enable_irq(port->txdoneq);
ports_open++;
@@ -1116,15 +1158,15 @@ static int hss_hdlc_close(struct net_device *dev)
spin_lock_irqsave(&npe_lock, flags);
ports_open--;
- qmgr_disable_irq(queue_ids[port->id].rx);
+ qmgr_disable_irq(port->rxq);
netif_stop_queue(dev);
napi_disable(&port->napi);
hss_stop_hdlc(port);
- while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
+ while (queue_get_desc(port->rxfreeq, port, 0) >= 0)
buffs--;
- while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
+ while (queue_get_desc(port->rxq, port, 0) >= 0)
buffs--;
if (buffs)
@@ -1132,12 +1174,12 @@ static int hss_hdlc_close(struct net_device *dev)
buffs);
buffs = TX_DESCS;
- while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
+ while (queue_get_desc(port->txq, port, 1) >= 0)
buffs--; /* cancel TX */
i = 0;
do {
- while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
+ while (queue_get_desc(port->txreadyq, port, 1) >= 0)
buffs--;
if (!buffs)
break;
@@ -1150,10 +1192,12 @@ static int hss_hdlc_close(struct net_device *dev)
if (!buffs)
printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
#endif
- qmgr_disable_irq(queue_ids[port->id].txdone);
+ qmgr_disable_irq(port->txdoneq);
- if (port->plat->close)
- port->plat->close(port->id, dev);
+ free_irq(gpiod_to_irq(port->dcd), dev);
+ /* GPIOs are flagged active low so this de-asserts DTR and RTS */
+ gpiod_set_value(port->dtr, 0);
+ gpiod_set_value(port->rts, 0);
spin_unlock_irqrestore(&npe_lock, flags);
destroy_hdlc_queues(port);
@@ -1162,7 +1206,6 @@ static int hss_hdlc_close(struct net_device *dev)
return 0;
}
-
static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
@@ -1171,7 +1214,7 @@ static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
if (encoding != ENCODING_NRZ)
return -EINVAL;
- switch(parity) {
+ switch (parity) {
case PARITY_CRC16_PR1_CCITT:
port->hdlc_cfg = 0;
return 0;
@@ -1185,14 +1228,14 @@ static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
}
}
-static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
+static u32 check_clock(u32 timer_freq, u32 rate, u32 a, u32 b, u32 c,
u32 *best, u32 *best_diff, u32 *reg)
{
/* a is 10-bit, b is 10-bit, c is 12-bit */
u64 new_rate;
u32 new_diff;
- new_rate = ixp4xx_timer_freq * (u64)(c + 1);
+ new_rate = timer_freq * (u64)(c + 1);
do_div(new_rate, a * (c + 1) + b + 1);
new_diff = abs((u32)new_rate - rate);
@@ -1204,61 +1247,77 @@ static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
return new_diff;
}
-static void find_best_clock(u32 rate, u32 *best, u32 *reg)
+static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg)
{
u32 a, b, diff = 0xFFFFFFFF;
- a = ixp4xx_timer_freq / rate;
+ a = timer_freq / rate;
if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */
- check_clock(rate, 0x3FF, 1, 1, best, &diff, reg);
+ check_clock(timer_freq, rate, 0x3FF, 1, 1, best, &diff, reg);
return;
}
if (a == 0) { /* > 66.666 MHz */
a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */
- rate = ixp4xx_timer_freq;
+ rate = timer_freq;
}
- if (rate * a == ixp4xx_timer_freq) { /* don't divide by 0 later */
- check_clock(rate, a - 1, 1, 1, best, &diff, reg);
+ if (rate * a == timer_freq) { /* don't divide by 0 later */
+ check_clock(timer_freq, rate, a - 1, 1, 1, best, &diff, reg);
return;
}
for (b = 0; b < 0x400; b++) {
u64 c = (b + 1) * (u64)rate;
- do_div(c, ixp4xx_timer_freq - rate * a);
+
+ do_div(c, timer_freq - rate * a);
c--;
if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */
if (b == 0 && /* also try a bit higher rate */
- !check_clock(rate, a - 1, 1, 1, best, &diff, reg))
+ !check_clock(timer_freq, rate, a - 1, 1, 1, best,
+ &diff, reg))
return;
- check_clock(rate, a, b, 0xFFF, best, &diff, reg);
+ check_clock(timer_freq, rate, a, b, 0xFFF, best,
+ &diff, reg);
return;
}
- if (!check_clock(rate, a, b, c, best, &diff, reg))
+ if (!check_clock(timer_freq, rate, a, b, c, best, &diff, reg))
return;
- if (!check_clock(rate, a, b, c + 1, best, &diff, reg))
+ if (!check_clock(timer_freq, rate, a, b, c + 1, best, &diff,
+ reg))
return;
}
}
-static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int hss_hdlc_set_clock(struct port *port, unsigned int clock_type)
+{
+ switch (clock_type) {
+ case CLOCK_DEFAULT:
+ case CLOCK_EXT:
+ gpiod_set_value(port->clk_internal, 0);
+ return CLOCK_EXT;
+ case CLOCK_INT:
+ gpiod_set_value(port->clk_internal, 1);
+ return CLOCK_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
struct port *port = dev_to_port(dev);
unsigned long flags;
int clk;
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
-
- switch(ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_V35;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_V35;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&new_line, 0, sizeof(new_line));
@@ -1271,14 +1330,13 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case IF_IFACE_SYNC_SERIAL:
case IF_IFACE_V35:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
clk = new_line.clock_type;
- if (port->plat->set_clock)
- clk = port->plat->set_clock(port->id, clk);
+ hss_hdlc_set_clock(port, clk);
if (clk != CLOCK_EXT && clk != CLOCK_INT)
return -EINVAL; /* No such clock setting */
@@ -1287,10 +1345,11 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL;
port->clock_type = clk; /* Update settings */
- if (clk == CLOCK_INT)
- find_best_clock(new_line.clock_rate, &port->clock_rate,
- &port->clock_reg);
- else {
+ if (clk == CLOCK_INT) {
+ find_best_clock(IXP4XX_TIMER_FREQ,
+ new_line.clock_rate,
+ &port->clock_rate, &port->clock_reg);
+ } else {
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
}
@@ -1310,7 +1369,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -1321,99 +1380,165 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static const struct net_device_ops hss_hdlc_ops = {
.ndo_open = hss_hdlc_open,
.ndo_stop = hss_hdlc_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = hss_hdlc_ioctl,
+ .ndo_siocwandev = hss_hdlc_ioctl,
};
-static int hss_init_one(struct platform_device *pdev)
+static int ixp4xx_hss_probe(struct platform_device *pdev)
{
+ struct of_phandle_args queue_spec;
+ struct of_phandle_args npe_spec;
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct device_node *np;
+ struct regmap *rmap;
struct port *port;
- struct net_device *dev;
hdlc_device *hdlc;
int err;
+ u32 val;
+
+ /*
+ * Go into the syscon and check if we have the HSS and HDLC
+ * features available, else this will not work.
+ */
+ rmap = syscon_regmap_lookup_by_compatible("syscon");
+ if (IS_ERR(rmap))
+ return dev_err_probe(dev, PTR_ERR(rmap),
+ "failed to look up syscon\n");
+
+ val = cpu_ixp4xx_features(rmap);
+
+ if ((val & (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
+ (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) {
+ dev_err(dev, "HDLC and HSS feature unavailable in platform\n");
+ return -ENODEV;
+ }
- if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
+ np = dev->of_node;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
return -ENOMEM;
- if ((port->npe = npe_request(0)) == NULL) {
- err = -ENODEV;
- goto err_free;
+ err = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0,
+ &npe_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no NPE engine specified\n");
+ /* NPE ID 0x00, 0x10, 0x20... */
+ port->npe = npe_request(npe_spec.args[0] << 4);
+ if (!port->npe) {
+ dev_err(dev, "unable to obtain NPE instance\n");
+ return -ENODEV;
}
- if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
+ /* Get the TX ready queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-txready", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no txready queue phandle\n");
+ port->txreadyq = queue_spec.args[0];
+ /* Get the RX trig queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-rxtrig", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no rxtrig queue phandle\n");
+ port->rxtrigq = queue_spec.args[0];
+ /* Get the RX queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rx", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no RX queue phandle\n");
+ port->rxq = queue_spec.args[0];
+ /* Get the TX queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-tx", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no RX queue phandle\n");
+ port->txq = queue_spec.args[0];
+ /* Get the RX free queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rxfree", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no RX free queue phandle\n");
+ port->rxfreeq = queue_spec.args[0];
+ /* Get the TX done queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-txdone", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no TX done queue phandle\n");
+ port->txdoneq = queue_spec.args[0];
+
+ /* Obtain all the line control GPIOs */
+ port->cts = devm_gpiod_get(dev, "cts", GPIOD_OUT_LOW);
+ if (IS_ERR(port->cts))
+ return dev_err_probe(dev, PTR_ERR(port->cts), "unable to get CTS GPIO\n");
+ port->rts = devm_gpiod_get(dev, "rts", GPIOD_OUT_LOW);
+ if (IS_ERR(port->rts))
+ return dev_err_probe(dev, PTR_ERR(port->rts), "unable to get RTS GPIO\n");
+ port->dcd = devm_gpiod_get(dev, "dcd", GPIOD_IN);
+ if (IS_ERR(port->dcd))
+ return dev_err_probe(dev, PTR_ERR(port->dcd), "unable to get DCD GPIO\n");
+ port->dtr = devm_gpiod_get(dev, "dtr", GPIOD_OUT_LOW);
+ if (IS_ERR(port->dtr))
+ return dev_err_probe(dev, PTR_ERR(port->dtr), "unable to get DTR GPIO\n");
+ port->clk_internal = devm_gpiod_get(dev, "clk-internal", GPIOD_OUT_LOW);
+ if (IS_ERR(port->clk_internal))
+ return dev_err_probe(dev, PTR_ERR(port->clk_internal),
+ "unable to get CLK internal GPIO\n");
+
+ ndev = alloc_hdlcdev(port);
+ port->netdev = alloc_hdlcdev(port);
+ if (!port->netdev) {
err = -ENOMEM;
goto err_plat;
}
- SET_NETDEV_DEV(dev, &pdev->dev);
- hdlc = dev_to_hdlc(dev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ hdlc = dev_to_hdlc(ndev);
hdlc->attach = hss_hdlc_attach;
hdlc->xmit = hss_hdlc_xmit;
- dev->netdev_ops = &hss_hdlc_ops;
- dev->tx_queue_len = 100;
+ ndev->netdev_ops = &hss_hdlc_ops;
+ ndev->tx_queue_len = 100;
port->clock_type = CLOCK_EXT;
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
port->id = pdev->id;
port->dev = &pdev->dev;
- port->plat = pdev->dev.platform_data;
- netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
- if ((err = register_hdlc_device(dev)))
+ err = register_hdlc_device(ndev);
+ if (err)
goto err_free_netdev;
platform_set_drvdata(pdev, port);
- netdev_info(dev, "initialized\n");
+ netdev_info(ndev, "initialized\n");
return 0;
err_free_netdev:
- free_netdev(dev);
+ free_netdev(ndev);
err_plat:
npe_release(port->npe);
-err_free:
- kfree(port);
return err;
}
-static int hss_remove_one(struct platform_device *pdev)
+static void ixp4xx_hss_remove(struct platform_device *pdev)
{
struct port *port = platform_get_drvdata(pdev);
unregister_hdlc_device(port->netdev);
free_netdev(port->netdev);
npe_release(port->npe);
- kfree(port);
- return 0;
}
static struct platform_driver ixp4xx_hss_driver = {
.driver.name = DRV_NAME,
- .probe = hss_init_one,
- .remove = hss_remove_one,
+ .probe = ixp4xx_hss_probe,
+ .remove = ixp4xx_hss_remove,
};
-
-static int __init hss_init_module(void)
-{
- if ((ixp4xx_read_feature_bits() &
- (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
- (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
- return -ENODEV;
-
- spin_lock_init(&npe_lock);
-
- return platform_driver_register(&ixp4xx_hss_driver);
-}
-
-static void __exit hss_cleanup_module(void)
-{
- platform_driver_unregister(&ixp4xx_hss_driver);
-}
+module_platform_driver(ixp4xx_hss_driver);
MODULE_AUTHOR("Krzysztof Halasa");
MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ixp4xx_hss");
-module_init(hss_init_module);
-module_exit(hss_cleanup_module);