summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/xilinx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/xilinx')
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c181
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h19
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c444
5 files changed, 446 insertions, 207 deletions
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 6304ebd8b5c6..0810af8193cb 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -32,7 +32,6 @@ config XILINX_AXI_EMAC
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
- depends on PPC || MICROBLAZE || X86 || COMPILE_TEST
select PHYLIB
---help---
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 53fb8141f1a6..4a73127e10a6 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -369,18 +369,20 @@ struct temac_local {
/* Buffer descriptors */
struct cdmac_bd *tx_bd_v;
dma_addr_t tx_bd_p;
+ u32 tx_bd_num;
struct cdmac_bd *rx_bd_v;
dma_addr_t rx_bd_p;
+ u32 rx_bd_num;
int tx_bd_ci;
- int tx_bd_next;
int tx_bd_tail;
int rx_bd_ci;
int rx_bd_tail;
/* DMA channel control setup */
- u32 tx_chnl_ctrl;
- u32 rx_chnl_ctrl;
+ u8 coalesce_count_tx;
+ u8 coalesce_delay_tx;
u8 coalesce_count_rx;
+ u8 coalesce_delay_rx;
struct delayed_work restart_work;
};
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9461acec6f70..3e313e71ae36 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -58,8 +58,11 @@
#include "ll_temac.h"
-#define TX_BD_NUM 64
-#define RX_BD_NUM 128
+/* Descriptors defines for Tx and Rx DMA */
+#define TX_BD_NUM_DEFAULT 64
+#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MAX 4096
+#define RX_BD_NUM_MAX 4096
/* ---------------------------------------------------------------------
* Low level register access functions
@@ -301,7 +304,7 @@ static void temac_dma_bd_release(struct net_device *ndev)
/* Reset Local Link (DMA) */
lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
if (!lp->rx_skb[i])
break;
else {
@@ -312,12 +315,12 @@ static void temac_dma_bd_release(struct net_device *ndev)
}
if (lp->rx_bd_v)
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- lp->rx_bd_v, lp->rx_bd_p);
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+ lp->rx_bd_v, lp->rx_bd_p);
if (lp->tx_bd_v)
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- lp->tx_bd_v, lp->tx_bd_p);
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+ lp->tx_bd_v, lp->tx_bd_p);
}
/**
@@ -330,33 +333,33 @@ static int temac_dma_bd_init(struct net_device *ndev)
dma_addr_t skb_dma_addr;
int i;
- lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb),
- GFP_KERNEL);
+ lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
+ sizeof(*lp->rx_skb), GFP_KERNEL);
if (!lp->rx_skb)
goto out;
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
- for (i = 0; i < TX_BD_NUM; i++) {
+ for (i = 0; i < lp->tx_bd_num; i++) {
lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
- + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM));
+ + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
}
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
- + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM));
+ + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
@@ -376,21 +379,22 @@ static int temac_dma_bd_init(struct net_device *ndev)
}
/* Configure DMA channel (irq setup) */
- lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl |
+ lp->dma_out(lp, TX_CHNL_CTRL,
+ lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
- lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl |
+ lp->dma_out(lp, RX_CHNL_CTRL,
+ lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
CHNL_CTRL_IRQ_IOE |
CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
/* Init descriptor indexes */
lp->tx_bd_ci = 0;
- lp->tx_bd_next = 0;
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- lp->rx_bd_tail = RX_BD_NUM - 1;
+ lp->rx_bd_tail = lp->rx_bd_num - 1;
/* Enable RX DMA transfers */
wmb();
@@ -785,7 +789,7 @@ static void temac_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
lp->tx_bd_ci++;
- if (lp->tx_bd_ci >= TX_BD_NUM)
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
@@ -811,7 +815,7 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
return NETDEV_TX_BUSY;
tail++;
- if (tail >= TX_BD_NUM)
+ if (tail >= lp->tx_bd_num)
tail = 0;
cur_p = &lp->tx_bd_v[tail];
@@ -826,14 +830,13 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
- dma_addr_t start_p, tail_p, skb_dma_addr;
+ dma_addr_t tail_p, skb_dma_addr;
int ii;
unsigned long num_frag;
skb_frag_t *frag;
num_frag = skb_shinfo(skb)->nr_frags;
frag = &skb_shinfo(skb)->frags[0];
- start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag + 1)) {
@@ -876,7 +879,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= TX_BD_NUM)
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -886,7 +889,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
if (--lp->tx_bd_tail < 0)
- lp->tx_bd_tail = TX_BD_NUM - 1;
+ lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
while (--ii >= 0) {
--frag;
@@ -895,7 +898,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_size(frag),
DMA_TO_DEVICE);
if (--lp->tx_bd_tail < 0)
- lp->tx_bd_tail = TX_BD_NUM - 1;
+ lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
}
dma_unmap_single(ndev->dev.parent,
@@ -914,7 +917,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
- if (lp->tx_bd_tail >= TX_BD_NUM)
+ if (lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
skb_tx_timestamp(skb);
@@ -934,7 +937,7 @@ static int ll_temac_recv_buffers_available(struct temac_local *lp)
return 0;
available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
if (available <= 0)
- available += RX_BD_NUM;
+ available += lp->rx_bd_num;
return available;
}
@@ -1003,7 +1006,7 @@ static void ll_temac_recv(struct net_device *ndev)
ndev->stats.rx_bytes += length;
rx_bd = lp->rx_bd_ci;
- if (++lp->rx_bd_ci >= RX_BD_NUM)
+ if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0;
} while (rx_bd != lp->rx_bd_tail);
@@ -1034,7 +1037,7 @@ static void ll_temac_recv(struct net_device *ndev)
dma_addr_t skb_dma_addr;
rx_bd = lp->rx_bd_tail + 1;
- if (rx_bd >= RX_BD_NUM)
+ if (rx_bd >= lp->rx_bd_num)
rx_bd = 0;
bd = &lp->rx_bd_v[rx_bd];
@@ -1250,13 +1253,96 @@ static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs,
};
-/* ethtool support */
+/* ---------------------------------------------------------------------
+ * ethtool support
+ */
+
+static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ ering->rx_max_pending = RX_BD_NUM_MAX;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->tx_max_pending = TX_BD_NUM_MAX;
+ ering->rx_pending = lp->rx_bd_num;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->tx_pending = lp->tx_bd_num;
+}
+
+static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (ering->rx_pending > RX_BD_NUM_MAX ||
+ ering->rx_mini_pending ||
+ ering->rx_jumbo_pending ||
+ ering->rx_pending > TX_BD_NUM_MAX)
+ return -EINVAL;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ lp->rx_bd_num = ering->rx_pending;
+ lp->tx_bd_num = ering->tx_pending;
+ return 0;
+}
+
+static int ll_temac_ethtools_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
+ ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
+ ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
+ ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
+ return 0;
+}
+
+static int ll_temac_ethtools_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
+ return -EFAULT;
+ }
+
+ if (ec->rx_max_coalesced_frames)
+ lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
+ if (ec->tx_max_coalesced_frames)
+ lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
+ /* With typical LocalLink clock speed of 200 MHz and
+ * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
+ */
+ if (ec->rx_coalesce_usecs)
+ lp->coalesce_delay_rx =
+ min(255U, (ec->rx_coalesce_usecs * 100) / 512);
+ if (ec->tx_coalesce_usecs)
+ lp->coalesce_delay_tx =
+ min(255U, (ec->tx_coalesce_usecs * 100) / 512);
+
+ return 0;
+}
+
static const struct ethtool_ops temac_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_ringparam = ll_temac_ethtools_get_ringparam,
+ .set_ringparam = ll_temac_ethtools_set_ringparam,
+ .get_coalesce = ll_temac_ethtools_get_coalesce,
+ .set_coalesce = ll_temac_ethtools_set_coalesce,
};
static int temac_probe(struct platform_device *pdev)
@@ -1300,6 +1386,8 @@ static int temac_probe(struct platform_device *pdev)
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XTE_OPTION_DEFAULTS;
+ lp->rx_bd_num = RX_BD_NUM_DEFAULT;
+ lp->tx_bd_num = TX_BD_NUM_DEFAULT;
spin_lock_init(&lp->rx_lock);
INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
@@ -1364,6 +1452,14 @@ static int temac_probe(struct platform_device *pdev)
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
+ /* Defaults for IRQ delay/coalescing setup. These are
+ * configuration values, so does not belong in device-tree.
+ */
+ lp->coalesce_delay_tx = 0x10;
+ lp->coalesce_count_tx = 0x22;
+ lp->coalesce_delay_rx = 0xff;
+ lp->coalesce_count_rx = 0x07;
+
/* Setup LocalLink DMA */
if (temac_np) {
/* Find the DMA node, map the DMA registers, and
@@ -1402,14 +1498,6 @@ static int temac_probe(struct platform_device *pdev)
lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
- /* Use defaults for IRQ delay/coalescing setup. These
- * are configuration values, so does not belong in
- * device-tree.
- */
- lp->tx_chnl_ctrl = 0x10220000;
- lp->rx_chnl_ctrl = 0xff070000;
- lp->coalesce_count_rx = 0x07;
-
/* Finished with the DMA node; drop the reference */
of_node_put(dma_np);
} else if (pdata) {
@@ -1435,18 +1523,13 @@ static int temac_probe(struct platform_device *pdev)
lp->tx_irq = platform_get_irq(pdev, 1);
/* IRQ delay/coalescing setup */
- if (pdata->tx_irq_timeout || pdata->tx_irq_count)
- lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) |
- (pdata->tx_irq_count << 16);
- else
- lp->tx_chnl_ctrl = 0x10220000;
+ if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
+ lp->coalesce_delay_tx = pdata->tx_irq_timeout;
+ lp->coalesce_count_tx = pdata->tx_irq_count;
+ }
if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
- lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
- (pdata->rx_irq_count << 16);
+ lp->coalesce_delay_rx = pdata->rx_irq_timeout;
lp->coalesce_count_rx = pdata->rx_irq_count;
- } else {
- lp->rx_chnl_ctrl = 0xff070000;
- lp->coalesce_count_rx = 0x07;
}
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 2dacfc85b3ba..fbaf3c987d9c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -161,17 +161,11 @@
#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
+#define XAE_ID_OFFSET 0x000004F8 /* Identification register */
#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
-#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
-/* MII Mgmt Interrupt Pending register offset */
-#define XAE_MDIO_MIP_OFFSET 0x00000620
-/* MII Management Interrupt Enable register offset */
-#define XAE_MDIO_MIE_OFFSET 0x00000640
-/* MII Management Interrupt Clear register offset. */
-#define XAE_MDIO_MIC_OFFSET 0x00000660
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
@@ -335,6 +329,7 @@
#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
#define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
#define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
+#define XAE_FEATURE_DMA_64BIT (1 << 4)
#define XAE_NO_CSUM_OFFLOAD 0
@@ -347,9 +342,9 @@
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
- * @reserved1: Reserved and not used
+ * @next_msb: MM2S/S2MM Next Descriptor Pointer (high 32 bits)
* @phys: MM2S/S2MM Buffer Address
- * @reserved2: Reserved and not used
+ * @phys_msb: MM2S/S2MM Buffer Address (high 32 bits)
* @reserved3: Reserved and not used
* @reserved4: Reserved and not used
* @cntrl: MM2S/S2MM Control value
@@ -362,9 +357,9 @@
*/
struct axidma_bd {
u32 next; /* Physical address of next buffer descriptor */
- u32 reserved1;
+ u32 next_msb; /* high 32 bits for IP >= v7.1, reserved on older IP */
u32 phys;
- u32 reserved2;
+ u32 phys_msb; /* for IP >= v7.1, reserved for older IP */
u32 reserved3;
u32 reserved4;
u32 cntrl;
@@ -435,7 +430,7 @@ struct axienet_local {
void __iomem *regs;
void __iomem *dma_regs;
- struct tasklet_struct dma_err_tasklet;
+ struct work_struct dma_err_task;
int tx_irq;
int rx_irq;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 20746b801959..fa5dc2993520 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -147,6 +147,34 @@ static inline void axienet_dma_out32(struct axienet_local *lp,
iowrite32(value, lp->dma_regs + reg);
}
+static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+ dma_addr_t addr)
+{
+ axienet_dma_out32(lp, reg, lower_32_bits(addr));
+
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
+}
+
+static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
+ struct axidma_bd *desc)
+{
+ desc->phys = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ desc->phys_msb = upper_32_bits(addr);
+}
+
+static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
+ struct axidma_bd *desc)
+{
+ dma_addr_t ret = desc->phys;
+
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
+
+ return ret;
+}
+
/**
* axienet_dma_bd_release - Release buffer descriptor rings
* @ndev: Pointer to the net_device structure
@@ -160,24 +188,41 @@ static void axienet_dma_bd_release(struct net_device *ndev)
int i;
struct axienet_local *lp = netdev_priv(ndev);
+ /* If we end up here, tx_bd_v must have been DMA allocated. */
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+ lp->tx_bd_v,
+ lp->tx_bd_p);
+
+ if (!lp->rx_bd_v)
+ return;
+
for (i = 0; i < lp->rx_bd_num; i++) {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- lp->max_frm_size, DMA_FROM_DEVICE);
+ dma_addr_t phys;
+
+ /* A NULL skb means this descriptor has not been initialised
+ * at all.
+ */
+ if (!lp->rx_bd_v[i].skb)
+ break;
+
dev_kfree_skb(lp->rx_bd_v[i].skb);
- }
- if (lp->rx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
- lp->rx_bd_v,
- lp->rx_bd_p);
- }
- if (lp->tx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
- lp->tx_bd_v,
- lp->tx_bd_p);
+ /* For each descriptor, we programmed cntrl with the (non-zero)
+ * descriptor size, after it had been successfully allocated.
+ * So a non-zero value in there means we need to unmap it.
+ */
+ if (lp->rx_bd_v[i].cntrl) {
+ phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
+ dma_unmap_single(ndev->dev.parent, phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ }
}
+
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+ lp->rx_bd_v,
+ lp->rx_bd_p);
}
/**
@@ -207,7 +252,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
- goto out;
+ return -ENOMEM;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
@@ -216,25 +261,37 @@ static int axienet_dma_bd_init(struct net_device *ndev)
goto out;
for (i = 0; i < lp->tx_bd_num; i++) {
- lp->tx_bd_v[i].next = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) *
- ((i + 1) % lp->tx_bd_num);
+ dma_addr_t addr = lp->tx_bd_p +
+ sizeof(*lp->tx_bd_v) *
+ ((i + 1) % lp->tx_bd_num);
+
+ lp->tx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
}
for (i = 0; i < lp->rx_bd_num; i++) {
- lp->rx_bd_v[i].next = lp->rx_bd_p +
- sizeof(*lp->rx_bd_v) *
- ((i + 1) % lp->rx_bd_num);
+ dma_addr_t addr;
+
+ addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
+ ((i + 1) % lp->rx_bd_num);
+ lp->rx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!skb)
goto out;
lp->rx_bd_v[i].skb = skb;
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
- skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ addr = dma_map_single(ndev->dev.parent, skb->data,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, addr)) {
+ netdev_err(ndev, "DMA mapping error\n");
+ goto out;
+ }
+ desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
+
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
@@ -267,18 +324,18 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting.
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
@@ -437,9 +494,10 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
-static void __axienet_device_reset(struct axienet_local *lp)
+static int __axienet_device_reset(struct axienet_local *lp)
{
u32 timeout;
+
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
@@ -455,9 +513,11 @@ static void __axienet_device_reset(struct axienet_local *lp)
if (--timeout == 0) {
netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
__func__);
- break;
+ return -ETIMEDOUT;
}
}
+
+ return 0;
}
/**
@@ -470,13 +530,17 @@ static void __axienet_device_reset(struct axienet_local *lp)
* areconnected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
+ * Returns 0 on success or a negative error number otherwise.
*/
-static void axienet_device_reset(struct net_device *ndev)
+static int axienet_device_reset(struct net_device *ndev)
{
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
+ int ret;
- __axienet_device_reset(lp);
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ return ret;
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
@@ -491,9 +555,11 @@ static void axienet_device_reset(struct net_device *ndev)
lp->options |= XAE_OPTION_JUMBO;
}
- if (axienet_dma_bd_init(ndev)) {
+ ret = axienet_dma_bd_init(ndev);
+ if (ret) {
netdev_err(ndev, "%s: descriptor allocation failed\n",
__func__);
+ return ret;
}
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -518,36 +584,54 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_setoptions(ndev, lp->options);
netif_trans_update(ndev);
+
+ return 0;
}
/**
- * axienet_start_xmit_done - Invoked once a transmit is completed by the
- * Axi DMA Tx channel.
+ * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
* @ndev: Pointer to the net_device structure
+ * @first_bd: Index of first descriptor to clean up
+ * @nr_bds: Number of descriptors to clean up, can be -1 if unknown.
+ * @sizep: Pointer to a u32 filled with the total sum of all bytes
+ * in all cleaned-up descriptors. Ignored if NULL.
*
- * This function is invoked from the Axi DMA Tx isr to notify the completion
- * of transmit operation. It clears fields in the corresponding Tx BDs and
- * unmaps the corresponding buffer so that CPU can regain ownership of the
- * buffer. It finally invokes "netif_wake_queue" to restart transmission if
- * required.
+ * Would either be called after a successful transmit operation, or after
+ * there was an error when setting up the chain.
+ * Returns the number of descriptors handled.
*/
-static void axienet_start_xmit_done(struct net_device *ndev)
+static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
+ int nr_bds, u32 *sizep)
{
- u32 size = 0;
- u32 packets = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- unsigned int status = 0;
-
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- status = cur_p->status;
- while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
- if (cur_p->skb)
+ int max_bds = nr_bds;
+ unsigned int status;
+ dma_addr_t phys;
+ int i;
+
+ if (max_bds == -1)
+ max_bds = lp->tx_bd_num;
+
+ for (i = 0; i < max_bds; i++) {
+ cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
+ status = cur_p->status;
+
+ /* If no number is given, clean up *all* descriptors that have
+ * been completed by the MAC.
+ */
+ if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ break;
+
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(ndev->dev.parent, phys,
+ (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+
+ if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
dev_consume_skb_irq(cur_p->skb);
- /*cur_p->phys = 0;*/
+
+ cur_p->cntrl = 0;
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
@@ -555,15 +639,36 @@ static void axienet_start_xmit_done(struct net_device *ndev)
cur_p->status = 0;
cur_p->skb = NULL;
- size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
- packets++;
-
- if (++lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- status = cur_p->status;
+ if (sizep)
+ *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
}
+ return i;
+}
+
+/**
+ * axienet_start_xmit_done - Invoked once a transmit is completed by the
+ * Axi DMA Tx channel.
+ * @ndev: Pointer to the net_device structure
+ *
+ * This function is invoked from the Axi DMA Tx isr to notify the completion
+ * of transmit operation. It clears fields in the corresponding Tx BDs and
+ * unmaps the corresponding buffer so that CPU can regain ownership of the
+ * buffer. It finally invokes "netif_wake_queue" to restart transmission if
+ * required.
+ */
+static void axienet_start_xmit_done(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 packets = 0;
+ u32 size = 0;
+
+ packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
+
+ lp->tx_bd_ci += packets;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci -= lp->tx_bd_num;
+
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
@@ -617,9 +722,10 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 csum_start_off;
u32 csum_index_off;
skb_frag_t *frag;
- dma_addr_t tail_p;
+ dma_addr_t tail_p, phys;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
+ u32 orig_tail_ptr = lp->tx_bd_tail;
num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -655,19 +761,37 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
+ phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
- cur_p->phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag),
- DMA_TO_DEVICE);
+ phys = dma_map_single(ndev->dev.parent,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
+ NULL);
+ lp->tx_bd_tail = orig_tail_ptr;
+
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_frag_size(frag);
}
@@ -676,7 +800,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
- axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+ axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
@@ -706,10 +830,12 @@ static void axienet_recv(struct net_device *ndev)
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ dma_addr_t phys;
+
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- lp->max_frm_size,
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
DMA_FROM_DEVICE);
skb = cur_p->skb;
@@ -745,9 +871,17 @@ static void axienet_recv(struct net_device *ndev)
if (!new_skb)
return;
- cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "RX DMA mapping error\n");
+ dev_kfree_skb(new_skb);
+ return;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
+
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
cur_p->skb = new_skb;
@@ -761,7 +895,7 @@ static void axienet_recv(struct net_device *ndev)
ndev->stats.rx_bytes += size;
if (tail_p)
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
}
/**
@@ -791,7 +925,8 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
(lp->tx_bd_v[lp->tx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
@@ -806,7 +941,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
/* Write to the Rx channel control register */
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet);
+ schedule_work(&lp->dma_err_task);
axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
}
out:
@@ -840,7 +975,8 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
(lp->rx_bd_v[lp->rx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
@@ -855,7 +991,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
/* write to the Rx channel control register */
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet);
+ schedule_work(&lp->dma_err_task);
axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
}
out:
@@ -891,7 +1027,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
return IRQ_HANDLED;
}
-static void axienet_dma_err_handler(unsigned long data);
+static void axienet_dma_err_handler(struct work_struct *work);
/**
* axienet_open - Driver open routine.
@@ -921,8 +1057,9 @@ static int axienet_open(struct net_device *ndev)
*/
mutex_lock(&lp->mii_bus->mdio_lock);
axienet_mdio_disable(lp);
- axienet_device_reset(ndev);
- ret = axienet_mdio_enable(lp);
+ ret = axienet_device_reset(ndev);
+ if (ret == 0)
+ ret = axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
if (ret < 0)
return ret;
@@ -935,9 +1072,8 @@ static int axienet_open(struct net_device *ndev)
phylink_start(lp->phylink);
- /* Enable tasklets for Axi DMA error handling */
- tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
- (unsigned long) lp);
+ /* Enable worker thread for Axi DMA error handling */
+ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
@@ -966,7 +1102,7 @@ err_rx_irq:
err_tx_irq:
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
- tasklet_kill(&lp->dma_err_tasklet);
+ cancel_work_sync(&lp->dma_err_task);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
@@ -1025,7 +1161,7 @@ static int axienet_stop(struct net_device *ndev)
axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
- tasklet_kill(&lp->dma_err_tasklet);
+ cancel_work_sync(&lp->dma_err_task);
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
@@ -1083,6 +1219,16 @@ static void axienet_poll_controller(struct net_device *ndev)
}
#endif
+static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phylink_mii_ioctl(lp->phylink, rq, cmd);
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
@@ -1090,6 +1236,7 @@ static const struct net_device_ops axienet_netdev_ops = {
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = axienet_ioctl,
.ndo_set_rx_mode = axienet_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = axienet_poll_controller,
@@ -1170,10 +1317,6 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
- data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
- data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
- data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
- data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
@@ -1309,27 +1452,6 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EFAULT;
}
- if ((ecoalesce->rx_coalesce_usecs) ||
- (ecoalesce->rx_coalesce_usecs_irq) ||
- (ecoalesce->rx_max_coalesced_frames_irq) ||
- (ecoalesce->tx_coalesce_usecs) ||
- (ecoalesce->tx_coalesce_usecs_irq) ||
- (ecoalesce->tx_max_coalesced_frames_irq) ||
- (ecoalesce->stats_block_coalesce_usecs) ||
- (ecoalesce->use_adaptive_rx_coalesce) ||
- (ecoalesce->use_adaptive_tx_coalesce) ||
- (ecoalesce->pkt_rate_low) ||
- (ecoalesce->rx_coalesce_usecs_low) ||
- (ecoalesce->rx_max_coalesced_frames_low) ||
- (ecoalesce->tx_coalesce_usecs_low) ||
- (ecoalesce->tx_max_coalesced_frames_low) ||
- (ecoalesce->pkt_rate_high) ||
- (ecoalesce->rx_coalesce_usecs_high) ||
- (ecoalesce->rx_max_coalesced_frames_high) ||
- (ecoalesce->tx_coalesce_usecs_high) ||
- (ecoalesce->tx_max_coalesced_frames_high) ||
- (ecoalesce->rate_sample_interval))
- return -EOPNOTSUPP;
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
if (ecoalesce->tx_max_coalesced_frames)
@@ -1357,6 +1479,7 @@ axienet_ethtools_set_link_ksettings(struct net_device *ndev,
}
static const struct ethtool_ops axienet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -1441,6 +1564,22 @@ static void axienet_mac_an_restart(struct phylink_config *config)
static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
u32 emmc_reg, fcc_reg;
@@ -1448,7 +1587,7 @@ static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
- switch (state->speed) {
+ switch (speed) {
case SPEED_1000:
emmc_reg |= XAE_EMMC_LINKSPD_1000;
break;
@@ -1467,32 +1606,17 @@ static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
- if (state->pause & MLO_PAUSE_TX)
+ if (tx_pause)
fcc_reg |= XAE_FCC_FCTX_MASK;
else
fcc_reg &= ~XAE_FCC_FCTX_MASK;
- if (state->pause & MLO_PAUSE_RX)
+ if (rx_pause)
fcc_reg |= XAE_FCC_FCRX_MASK;
else
fcc_reg &= ~XAE_FCC_FCRX_MASK;
axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
}
-static void axienet_mac_link_down(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
- /* nothing meaningful to do */
-}
-
-static void axienet_mac_link_up(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy)
-{
- /* nothing meaningful to do */
-}
-
static const struct phylink_mac_ops axienet_phylink_ops = {
.validate = axienet_validate,
.mac_pcs_get_state = axienet_mac_pcs_get_state,
@@ -1503,17 +1627,18 @@ static const struct phylink_mac_ops axienet_phylink_ops = {
};
/**
- * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
- * @data: Data passed
+ * axienet_dma_err_handler - Work queue task for Axi DMA Error
+ * @work: pointer to work_struct
*
* Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
* Tx/Rx BDs.
*/
-static void axienet_dma_err_handler(unsigned long data)
+static void axienet_dma_err_handler(struct work_struct *work)
{
u32 axienet_status;
u32 cr, i;
- struct axienet_local *lp = (struct axienet_local *) data;
+ struct axienet_local *lp = container_of(work, struct axienet_local,
+ dma_err_task);
struct net_device *ndev = lp->ndev;
struct axidma_bd *cur_p;
@@ -1533,14 +1658,18 @@ static void axienet_dma_err_handler(unsigned long data)
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
- if (cur_p->phys)
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ if (cur_p->cntrl) {
+ dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
+
+ dma_unmap_single(ndev->dev.parent, addr,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
+ }
if (cur_p->skb)
dev_kfree_skb_irq(cur_p->skb);
cur_p->phys = 0;
+ cur_p->phys_msb = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
cur_p->app0 = 0;
@@ -1594,18 +1723,18 @@ static void axienet_dma_err_handler(unsigned long data)
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
@@ -1651,6 +1780,7 @@ static int axienet_probe(struct platform_device *pdev)
struct net_device *ndev;
const void *mac_addr;
struct resource *ethres;
+ int addr_width = 32;
u32 value;
ndev = alloc_etherdev(sizeof(*lp));
@@ -1781,7 +1911,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- lp->eth_irq = platform_get_irq(pdev, 0);
+ lp->eth_irq = platform_get_irq_optional(pdev, 0);
} else {
/* Check for these resources directly on the Ethernet node. */
struct resource *res = platform_get_resource(pdev,
@@ -1789,7 +1919,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
lp->rx_irq = platform_get_irq(pdev, 1);
lp->tx_irq = platform_get_irq(pdev, 0);
- lp->eth_irq = platform_get_irq(pdev, 2);
+ lp->eth_irq = platform_get_irq_optional(pdev, 2);
}
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
@@ -1802,6 +1932,36 @@ static int axienet_probe(struct platform_device *pdev)
goto free_netdev;
}
+ /* Autodetect the need for 64-bit DMA pointers.
+ * When the IP is configured for a bus width bigger than 32 bits,
+ * writing the MSB registers is mandatory, even if they are all 0.
+ * We can detect this case by writing all 1's to one such register
+ * and see if that sticks: when the IP is configured for 32 bits
+ * only, those registers are RES0.
+ * Those MSB registers were introduced in IP v7.1, which we check first.
+ */
+ if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
+ void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
+
+ iowrite32(0x0, desc);
+ if (ioread32(desc) == 0) { /* sanity check */
+ iowrite32(0xffffffff, desc);
+ if (ioread32(desc) > 0) {
+ lp->features |= XAE_FEATURE_DMA_64BIT;
+ addr_width = 64;
+ dev_info(&pdev->dev,
+ "autodetected 64-bit DMA range\n");
+ }
+ iowrite32(0x0, desc);
+ }
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
+ if (ret) {
+ dev_err(&pdev->dev, "No suitable DMA available\n");
+ goto free_netdev;
+ }
+
/* Check for Ethernet core IRQ (optional) */
if (lp->eth_irq <= 0)
dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");