summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cadence/macb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cadence/macb_main.c')
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c758
1 files changed, 598 insertions, 160 deletions
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 72a67f74b97b..e84afcf1ecb5 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -194,17 +194,17 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
return index & (bp->rx_ring_size - 1);
}
-static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
{
- index = macb_rx_ring_wrap(bp, index);
- index = macb_adj_dma_desc_idx(bp, index);
- return &bp->rx_ring[index];
+ index = macb_rx_ring_wrap(queue->bp, index);
+ index = macb_adj_dma_desc_idx(queue->bp, index);
+ return &queue->rx_ring[index];
}
-static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
{
- return bp->rx_buffers + bp->rx_buffer_size *
- macb_rx_ring_wrap(bp, index);
+ return queue->rx_buffers + queue->bp->rx_buffer_size *
+ macb_rx_ring_wrap(queue->bp, index);
}
/* I/O accessors */
@@ -759,7 +759,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_ring_wrap(bp, tail),
skb->data);
bp->dev->stats.tx_packets++;
+ queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
+ queue->stats.tx_bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -859,7 +861,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
macb_tx_ring_wrap(bp, tail),
skb->data);
bp->dev->stats.tx_packets++;
+ queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
+ queue->stats.tx_bytes += skb->len;
}
/* Now we can safely release resources */
@@ -881,24 +885,25 @@ static void macb_tx_interrupt(struct macb_queue *queue)
netif_wake_subqueue(bp->dev, queue_index);
}
-static void gem_rx_refill(struct macb *bp)
+static void gem_rx_refill(struct macb_queue *queue)
{
unsigned int entry;
struct sk_buff *skb;
dma_addr_t paddr;
+ struct macb *bp = queue->bp;
struct macb_dma_desc *desc;
- while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
- bp->rx_ring_size) > 0) {
- entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
+ while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
+ bp->rx_ring_size) > 0) {
+ entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */
rmb();
- bp->rx_prepared_head++;
- desc = macb_rx_desc(bp, entry);
+ queue->rx_prepared_head++;
+ desc = macb_rx_desc(queue, entry);
- if (!bp->rx_skbuff[entry]) {
+ if (!queue->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
if (unlikely(!skb)) {
@@ -916,7 +921,7 @@ static void gem_rx_refill(struct macb *bp)
break;
}
- bp->rx_skbuff[entry] = skb;
+ queue->rx_skbuff[entry] = skb;
if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
@@ -934,18 +939,18 @@ static void gem_rx_refill(struct macb *bp)
/* Make descriptor updates visible to hardware */
wmb();
- netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
- bp->rx_prepared_head, bp->rx_tail);
+ netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
+ queue, queue->rx_prepared_head, queue->rx_tail);
}
/* Mark DMA descriptors from begin up to and not including end as unused */
-static void discard_partial_frame(struct macb *bp, unsigned int begin,
+static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
unsigned int end)
{
unsigned int frag;
for (frag = begin; frag != end; frag++) {
- struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+ struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
}
@@ -959,8 +964,9 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
*/
}
-static int gem_rx(struct macb *bp, int budget)
+static int gem_rx(struct macb_queue *queue, int budget)
{
+ struct macb *bp = queue->bp;
unsigned int len;
unsigned int entry;
struct sk_buff *skb;
@@ -972,8 +978,8 @@ static int gem_rx(struct macb *bp, int budget)
dma_addr_t addr;
bool rxused;
- entry = macb_rx_ring_wrap(bp, bp->rx_tail);
- desc = macb_rx_desc(bp, entry);
+ entry = macb_rx_ring_wrap(bp, queue->rx_tail);
+ desc = macb_rx_desc(queue, entry);
/* Make hw descriptor updates visible to CPU */
rmb();
@@ -985,24 +991,26 @@ static int gem_rx(struct macb *bp, int budget)
if (!rxused)
break;
- bp->rx_tail++;
+ queue->rx_tail++;
count++;
if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
netdev_err(bp->dev,
"not whole frame pointed by descriptor\n");
bp->dev->stats.rx_dropped++;
+ queue->stats.rx_dropped++;
break;
}
- skb = bp->rx_skbuff[entry];
+ skb = queue->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(bp->dev,
"inconsistent Rx descriptor chain\n");
bp->dev->stats.rx_dropped++;
+ queue->stats.rx_dropped++;
break;
}
/* now everything is ready for receiving packet */
- bp->rx_skbuff[entry] = NULL;
+ queue->rx_skbuff[entry] = NULL;
len = ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
@@ -1019,7 +1027,9 @@ static int gem_rx(struct macb *bp, int budget)
skb->ip_summed = CHECKSUM_UNNECESSARY;
bp->dev->stats.rx_packets++;
+ queue->stats.rx_packets++;
bp->dev->stats.rx_bytes += skb->len;
+ queue->stats.rx_bytes += skb->len;
gem_ptp_do_rxstamp(bp, skb, desc);
@@ -1035,12 +1045,12 @@ static int gem_rx(struct macb *bp, int budget)
netif_receive_skb(skb);
}
- gem_rx_refill(bp);
+ gem_rx_refill(queue);
return count;
}
-static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
unsigned int last_frag)
{
unsigned int len;
@@ -1048,8 +1058,9 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
unsigned int offset;
struct sk_buff *skb;
struct macb_dma_desc *desc;
+ struct macb *bp = queue->bp;
- desc = macb_rx_desc(bp, last_frag);
+ desc = macb_rx_desc(queue, last_frag);
len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
@@ -1068,7 +1079,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
if (!skb) {
bp->dev->stats.rx_dropped++;
for (frag = first_frag; ; frag++) {
- desc = macb_rx_desc(bp, frag);
+ desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
break;
@@ -1096,10 +1107,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
- macb_rx_buffer(bp, frag),
+ macb_rx_buffer(queue, frag),
frag_len);
offset += bp->rx_buffer_size;
- desc = macb_rx_desc(bp, frag);
+ desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
@@ -1121,32 +1132,34 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
return 0;
}
-static inline void macb_init_rx_ring(struct macb *bp)
+static inline void macb_init_rx_ring(struct macb_queue *queue)
{
+ struct macb *bp = queue->bp;
dma_addr_t addr;
struct macb_dma_desc *desc = NULL;
int i;
- addr = bp->rx_buffers_dma;
+ addr = queue->rx_buffers_dma;
for (i = 0; i < bp->rx_ring_size; i++) {
- desc = macb_rx_desc(bp, i);
+ desc = macb_rx_desc(queue, i);
macb_set_addr(bp, desc, addr);
desc->ctrl = 0;
addr += bp->rx_buffer_size;
}
desc->addr |= MACB_BIT(RX_WRAP);
- bp->rx_tail = 0;
+ queue->rx_tail = 0;
}
-static int macb_rx(struct macb *bp, int budget)
+static int macb_rx(struct macb_queue *queue, int budget)
{
+ struct macb *bp = queue->bp;
bool reset_rx_queue = false;
int received = 0;
unsigned int tail;
int first_frag = -1;
- for (tail = bp->rx_tail; budget > 0; tail++) {
- struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
+ for (tail = queue->rx_tail; budget > 0; tail++) {
+ struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
u32 ctrl;
/* Make hw descriptor updates visible to CPU */
@@ -1159,7 +1172,7 @@ static int macb_rx(struct macb *bp, int budget)
if (ctrl & MACB_BIT(RX_SOF)) {
if (first_frag != -1)
- discard_partial_frame(bp, first_frag, tail);
+ discard_partial_frame(queue, first_frag, tail);
first_frag = tail;
}
@@ -1171,7 +1184,7 @@ static int macb_rx(struct macb *bp, int budget)
continue;
}
- dropped = macb_rx_frame(bp, first_frag, tail);
+ dropped = macb_rx_frame(queue, first_frag, tail);
first_frag = -1;
if (unlikely(dropped < 0)) {
reset_rx_queue = true;
@@ -1195,8 +1208,8 @@ static int macb_rx(struct macb *bp, int budget)
ctrl = macb_readl(bp, NCR);
macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
- macb_init_rx_ring(bp);
- macb_writel(bp, RBQP, bp->rx_ring_dma);
+ macb_init_rx_ring(queue);
+ queue_writel(queue, RBQP, queue->rx_ring_dma);
macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
@@ -1205,16 +1218,17 @@ static int macb_rx(struct macb *bp, int budget)
}
if (first_frag != -1)
- bp->rx_tail = first_frag;
+ queue->rx_tail = first_frag;
else
- bp->rx_tail = tail;
+ queue->rx_tail = tail;
return received;
}
static int macb_poll(struct napi_struct *napi, int budget)
{
- struct macb *bp = container_of(napi, struct macb, napi);
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
+ struct macb *bp = queue->bp;
int work_done;
u32 status;
@@ -1224,7 +1238,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget);
- work_done = bp->macbgem_ops.mog_rx(bp, budget);
+ work_done = bp->macbgem_ops.mog_rx(queue, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
@@ -1232,10 +1246,10 @@ static int macb_poll(struct napi_struct *napi, int budget)
status = macb_readl(bp, RSR);
if (status) {
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(RCOMP));
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
napi_reschedule(napi);
} else {
- macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ queue_writel(queue, IER, MACB_RX_INT_FLAGS);
}
}
@@ -1244,6 +1258,57 @@ static int macb_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void macb_hresp_error_task(unsigned long data)
+{
+ struct macb *bp = (struct macb *)data;
+ struct net_device *dev = bp->dev;
+ struct macb_queue *queue = bp->queues;
+ unsigned int q;
+ u32 ctrl;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+ ctrl = macb_readl(bp, NCR);
+ ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, ctrl);
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+
+ bp->macbgem_ops.mog_init_rings(bp);
+
+ /* Initialize TX and RX buffers */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, RBQPH,
+ upper_32_bits(queue->rx_ring_dma));
+#endif
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH,
+ upper_32_bits(queue->tx_ring_dma));
+#endif
+
+ /* Enable interrupts */
+ queue_writel(queue, IER,
+ MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+
+ ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
+ macb_writel(bp, NCR, ctrl);
+
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+}
+
static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
@@ -1282,9 +1347,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- if (napi_schedule_prep(&bp->napi)) {
+ if (napi_schedule_prep(&queue->napi)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
- __napi_schedule(&bp->napi);
+ __napi_schedule(&queue->napi);
}
}
@@ -1333,10 +1398,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_BIT(HRESP)) {
- /* TODO: Reset the hardware, and maybe move the
- * netdev_err to a lower-priority context as well
- * (work queue?)
- */
+ tasklet_schedule(&bp->hresp_err_tasklet);
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1708,38 +1770,44 @@ static void gem_free_rx_buffers(struct macb *bp)
{
struct sk_buff *skb;
struct macb_dma_desc *desc;
+ struct macb_queue *queue;
dma_addr_t addr;
+ unsigned int q;
int i;
- if (!bp->rx_skbuff)
- return;
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ if (!queue->rx_skbuff)
+ continue;
- for (i = 0; i < bp->rx_ring_size; i++) {
- skb = bp->rx_skbuff[i];
+ for (i = 0; i < bp->rx_ring_size; i++) {
+ skb = queue->rx_skbuff[i];
- if (!skb)
- continue;
+ if (!skb)
+ continue;
- desc = macb_rx_desc(bp, i);
- addr = macb_get_addr(bp, desc);
+ desc = macb_rx_desc(queue, i);
+ addr = macb_get_addr(bp, desc);
- dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- skb = NULL;
- }
+ dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
- kfree(bp->rx_skbuff);
- bp->rx_skbuff = NULL;
+ kfree(queue->rx_skbuff);
+ queue->rx_skbuff = NULL;
+ }
}
static void macb_free_rx_buffers(struct macb *bp)
{
- if (bp->rx_buffers) {
+ struct macb_queue *queue = &bp->queues[0];
+
+ if (queue->rx_buffers) {
dma_free_coherent(&bp->pdev->dev,
bp->rx_ring_size * bp->rx_buffer_size,
- bp->rx_buffers, bp->rx_buffers_dma);
- bp->rx_buffers = NULL;
+ queue->rx_buffers, queue->rx_buffers_dma);
+ queue->rx_buffers = NULL;
}
}
@@ -1748,11 +1816,12 @@ static void macb_free_consistent(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
+ queue = &bp->queues[0];
bp->macbgem_ops.mog_free_rx_buffers(bp);
- if (bp->rx_ring) {
+ if (queue->rx_ring) {
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
- bp->rx_ring, bp->rx_ring_dma);
- bp->rx_ring = NULL;
+ queue->rx_ring, queue->rx_ring_dma);
+ queue->rx_ring = NULL;
}
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -1768,32 +1837,37 @@ static void macb_free_consistent(struct macb *bp)
static int gem_alloc_rx_buffers(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
int size;
- size = bp->rx_ring_size * sizeof(struct sk_buff *);
- bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
- if (!bp->rx_skbuff)
- return -ENOMEM;
- else
- netdev_dbg(bp->dev,
- "Allocated %d RX struct sk_buff entries at %p\n",
- bp->rx_ring_size, bp->rx_skbuff);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ size = bp->rx_ring_size * sizeof(struct sk_buff *);
+ queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
+ if (!queue->rx_skbuff)
+ return -ENOMEM;
+ else
+ netdev_dbg(bp->dev,
+ "Allocated %d RX struct sk_buff entries at %p\n",
+ bp->rx_ring_size, queue->rx_skbuff);
+ }
return 0;
}
static int macb_alloc_rx_buffers(struct macb *bp)
{
+ struct macb_queue *queue = &bp->queues[0];
int size;
size = bp->rx_ring_size * bp->rx_buffer_size;
- bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
- &bp->rx_buffers_dma, GFP_KERNEL);
- if (!bp->rx_buffers)
+ queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+ &queue->rx_buffers_dma, GFP_KERNEL);
+ if (!queue->rx_buffers)
return -ENOMEM;
netdev_dbg(bp->dev,
"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+ size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
return 0;
}
@@ -1819,17 +1893,16 @@ static int macb_alloc_consistent(struct macb *bp)
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
- }
-
- size = RX_RING_BYTES(bp);
- bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &bp->rx_ring_dma, GFP_KERNEL);
- if (!bp->rx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+ size = RX_RING_BYTES(bp);
+ queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &queue->rx_ring_dma, GFP_KERNEL);
+ if (!queue->rx_ring)
+ goto out_err;
+ netdev_dbg(bp->dev,
+ "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
+ }
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@@ -1856,12 +1929,13 @@ static void gem_init_rings(struct macb *bp)
desc->ctrl |= MACB_BIT(TX_WRAP);
queue->tx_head = 0;
queue->tx_tail = 0;
- }
- bp->rx_tail = 0;
- bp->rx_prepared_head = 0;
+ queue->rx_tail = 0;
+ queue->rx_prepared_head = 0;
+
+ gem_rx_refill(queue);
+ }
- gem_rx_refill(bp);
}
static void macb_init_rings(struct macb *bp)
@@ -1869,7 +1943,7 @@ static void macb_init_rings(struct macb *bp)
int i;
struct macb_dma_desc *desc = NULL;
- macb_init_rx_ring(bp);
+ macb_init_rx_ring(&bp->queues[0]);
for (i = 0; i < bp->tx_ring_size; i++) {
desc = macb_tx_desc(&bp->queues[0], i);
@@ -1978,11 +2052,20 @@ static u32 macb_dbw(struct macb *bp)
*/
static void macb_configure_dma(struct macb *bp)
{
+ struct macb_queue *queue;
+ u32 buffer_size;
+ unsigned int q;
u32 dmacfg;
+ buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
if (macb_is_gem(bp)) {
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
- dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ if (q)
+ queue_writel(queue, RBQS, buffer_size);
+ else
+ dmacfg |= GEM_BF(RXBS, buffer_size);
+ }
if (bp->dma_burst_length)
dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
@@ -2051,12 +2134,12 @@ static void macb_init_hw(struct macb *bp)
macb_configure_dma(bp);
/* Initialize TX and RX buffers */
- macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
#endif
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
@@ -2197,6 +2280,8 @@ static int macb_open(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+ struct macb_queue *queue;
+ unsigned int q;
int err;
netdev_dbg(bp->dev, "open\n");
@@ -2218,11 +2303,12 @@ static int macb_open(struct net_device *dev)
return err;
}
- napi_enable(&bp->napi);
-
bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ napi_enable(&queue->napi);
+
/* schedule a link state check */
phy_start(dev->phydev);
@@ -2237,10 +2323,14 @@ static int macb_open(struct net_device *dev)
static int macb_close(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
unsigned long flags;
+ unsigned int q;
netif_tx_stop_all_queues(dev);
- napi_disable(&bp->napi);
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ napi_disable(&queue->napi);
if (dev->phydev)
phy_stop(dev->phydev);
@@ -2270,7 +2360,10 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
static void gem_update_stats(struct macb *bp)
{
- unsigned int i;
+ struct macb_queue *queue;
+ unsigned int i, q, idx;
+ unsigned long *stat;
+
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
@@ -2287,6 +2380,11 @@ static void gem_update_stats(struct macb *bp)
*(++p) += val;
}
}
+
+ idx = GEM_STATS_LEN;
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
+ bp->ethtool_stats[idx++] = *stat;
}
static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -2334,14 +2432,17 @@ static void gem_get_ethtool_stats(struct net_device *dev,
bp = netdev_priv(dev);
gem_update_stats(bp);
- memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
+ memcpy(data, &bp->ethtool_stats, sizeof(u64)
+ * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
}
static int gem_get_sset_count(struct net_device *dev, int sset)
{
+ struct macb *bp = netdev_priv(dev);
+
switch (sset) {
case ETH_SS_STATS:
- return GEM_STATS_LEN;
+ return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -2349,13 +2450,25 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
{
+ char stat_string[ETH_GSTRING_LEN];
+ struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
unsigned int i;
+ unsigned int q;
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
memcpy(p, gem_statistics[i].stat_string,
ETH_GSTRING_LEN);
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
+ snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
+ q, queue_statistics[i].stat_string);
+ memcpy(p, stat_string, ETH_GSTRING_LEN);
+ }
+ }
break;
}
}
@@ -2603,6 +2716,307 @@ static int macb_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
}
+static void gem_enable_flow_filters(struct macb *bp, bool enable)
+{
+ struct ethtool_rx_fs_item *item;
+ u32 t2_scr;
+ int num_t2_scr;
+
+ num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ struct ethtool_rx_flow_spec *fs = &item->fs;
+ struct ethtool_tcpip4_spec *tp4sp_m;
+
+ if (fs->location >= num_t2_scr)
+ continue;
+
+ t2_scr = gem_readl_n(bp, SCRT2, fs->location);
+
+ /* enable/disable screener regs for the flow entry */
+ t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
+
+ /* only enable fields with no masking */
+ tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+ if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
+ t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
+
+ if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
+ t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
+
+ if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
+ t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
+
+ gem_writel_n(bp, SCRT2, fs->location, t2_scr);
+ }
+}
+
+static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
+ uint16_t index = fs->location;
+ u32 w0, w1, t2_scr;
+ bool cmp_a = false;
+ bool cmp_b = false;
+ bool cmp_c = false;
+
+ tp4sp_v = &(fs->h_u.tcp_ip4_spec);
+ tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+ /* ignore field if any masking set */
+ if (tp4sp_m->ip4src == 0xFFFFFFFF) {
+ /* 1st compare reg - IP source address */
+ w0 = 0;
+ w1 = 0;
+ w0 = tp4sp_v->ip4src;
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+ w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
+ cmp_a = true;
+ }
+
+ /* ignore field if any masking set */
+ if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
+ /* 2nd compare reg - IP destination address */
+ w0 = 0;
+ w1 = 0;
+ w0 = tp4sp_v->ip4dst;
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+ w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
+ cmp_b = true;
+ }
+
+ /* ignore both port fields if masking set in both */
+ if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
+ /* 3rd compare reg - source port, destination port */
+ w0 = 0;
+ w1 = 0;
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
+ if (tp4sp_m->psrc == tp4sp_m->pdst) {
+ w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+ } else {
+ /* only one port definition */
+ w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
+ w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
+ if (tp4sp_m->psrc == 0xFFFF) { /* src port */
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
+ w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+ } else { /* dst port */
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+ w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
+ }
+ }
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
+ cmp_c = true;
+ }
+
+ t2_scr = 0;
+ t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
+ t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
+ if (cmp_a)
+ t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
+ if (cmp_b)
+ t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
+ if (cmp_c)
+ t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
+ gem_writel_n(bp, SCRT2, index, t2_scr);
+}
+
+static int gem_add_flow_filter(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct ethtool_rx_fs_item *item, *newfs;
+ unsigned long flags;
+ int ret = -EINVAL;
+ bool added = false;
+
+ newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
+ if (newfs == NULL)
+ return -ENOMEM;
+ memcpy(&newfs->fs, fs, sizeof(newfs->fs));
+
+ netdev_dbg(netdev,
+ "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+ fs->flow_type, (int)fs->ring_cookie, fs->location,
+ htonl(fs->h_u.tcp_ip4_spec.ip4src),
+ htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+ htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+ /* find correct place to add in list */
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location > newfs->fs.location) {
+ list_add_tail(&newfs->list, &item->list);
+ added = true;
+ break;
+ } else if (item->fs.location == fs->location) {
+ netdev_err(netdev, "Rule not added: location %d not free!\n",
+ fs->location);
+ ret = -EBUSY;
+ goto err;
+ }
+ }
+ if (!added)
+ list_add_tail(&newfs->list, &bp->rx_fs_list.list);
+
+ gem_prog_cmp_regs(bp, fs);
+ bp->rx_fs_list.count++;
+ /* enable filtering if NTUPLE on */
+ if (netdev->features & NETIF_F_NTUPLE)
+ gem_enable_flow_filters(bp, 1);
+
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ kfree(newfs);
+ return ret;
+}
+
+static int gem_del_flow_filter(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+ struct ethtool_rx_flow_spec *fs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location == cmd->fs.location) {
+ /* disable screener regs for the flow entry */
+ fs = &(item->fs);
+ netdev_dbg(netdev,
+ "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+ fs->flow_type, (int)fs->ring_cookie, fs->location,
+ htonl(fs->h_u.tcp_ip4_spec.ip4src),
+ htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+ htons(fs->h_u.tcp_ip4_spec.psrc),
+ htons(fs->h_u.tcp_ip4_spec.pdst));
+
+ gem_writel_n(bp, SCRT2, fs->location, 0);
+
+ list_del(&item->list);
+ bp->rx_fs_list.count--;
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ kfree(item);
+ return 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ return -EINVAL;
+}
+
+static int gem_get_flow_entry(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location == cmd->fs.location) {
+ memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int gem_get_all_flow_entries(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+ uint32_t cnt = 0;
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = item->fs.location;
+ cnt++;
+ }
+ cmd->data = bp->max_tuples;
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct macb *bp = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = bp->num_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bp->rx_fs_list.count;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = gem_get_flow_entry(netdev, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
+ break;
+ default:
+ netdev_err(netdev,
+ "Command parameter %d is not supported\n", cmd->cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ int ret;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if ((cmd->fs.location >= bp->max_tuples)
+ || (cmd->fs.ring_cookie >= bp->num_queues)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = gem_add_flow_filter(netdev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = gem_del_flow_filter(netdev, cmd);
+ break;
+ default:
+ netdev_err(netdev,
+ "Command parameter %d is not supported\n", cmd->cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops macb_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
@@ -2628,6 +3042,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
+ .get_rxnfc = gem_get_rxnfc,
+ .set_rxnfc = gem_set_rxnfc,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2685,6 +3101,12 @@ static int macb_set_features(struct net_device *netdev,
gem_writel(bp, NCFGR, netcfg);
}
+ /* RX Flow Filters */
+ if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
+ bool turn_on = features & NETIF_F_NTUPLE;
+
+ gem_enable_flow_filters(bp, turn_on);
+ }
return 0;
}
@@ -2850,7 +3272,7 @@ static int macb_init(struct platform_device *pdev)
struct macb *bp = netdev_priv(dev);
struct macb_queue *queue;
int err;
- u32 val;
+ u32 val, reg;
bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
@@ -2865,15 +3287,20 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q];
queue->bp = bp;
+ netif_napi_add(dev, &queue->napi, macb_poll, 64);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
queue->IDR = GEM_IDR(hw_q - 1);
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
+ queue->RBQP = GEM_RBQP(hw_q - 1);
+ queue->RBQS = GEM_RBQS(hw_q - 1);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = GEM_TBQPH(hw_q - 1);
+ queue->RBQPH = GEM_RBQPH(hw_q - 1);
+ }
#endif
} else {
/* queue0 uses legacy registers */
@@ -2882,9 +3309,12 @@ static int macb_init(struct platform_device *pdev)
queue->IDR = MACB_IDR;
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
+ queue->RBQP = MACB_RBQP;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = MACB_TBQPH;
+ queue->RBQPH = MACB_RBQPH;
+ }
#endif
}
@@ -2908,7 +3338,6 @@ static int macb_init(struct platform_device *pdev)
}
dev->netdev_ops = &macb_netdev_ops;
- netif_napi_add(dev, &bp->napi, macb_poll, 64);
/* setup appropriated routines according to adapter type */
if (macb_is_gem(bp)) {
@@ -2941,6 +3370,30 @@ static int macb_init(struct platform_device *pdev)
dev->hw_features &= ~NETIF_F_SG;
dev->features = dev->hw_features;
+ /* Check RX Flow Filters support.
+ * Max Rx flows set by availability of screeners & compare regs:
+ * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
+ */
+ reg = gem_readl(bp, DCFG8);
+ bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
+ GEM_BFEXT(T2SCR, reg));
+ if (bp->max_tuples > 0) {
+ /* also needs one ethtype match to check IPv4 */
+ if (GEM_BFEXT(SCR2ETH, reg) > 0) {
+ /* program this reg now */
+ reg = 0;
+ reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
+ gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
+ /* Filtering is supported in hw but don't enable it in kernel now */
+ dev->hw_features |= NETIF_F_NTUPLE;
+ /* init Rx flow definitions */
+ INIT_LIST_HEAD(&bp->rx_fs_list.list);
+ bp->rx_fs_list.count = 0;
+ spin_lock_init(&bp->rx_fs_lock);
+ } else
+ bp->max_tuples = 0;
+ }
+
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
val = 0;
if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
@@ -2977,34 +3430,35 @@ static int macb_init(struct platform_device *pdev)
static int at91ether_start(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc;
dma_addr_t addr;
u32 ctl;
int i;
- lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+ q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp)),
- &lp->rx_ring_dma, GFP_KERNEL);
- if (!lp->rx_ring)
+ &q->rx_ring_dma, GFP_KERNEL);
+ if (!q->rx_ring)
return -ENOMEM;
- lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+ q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
AT91ETHER_MAX_RBUFF_SZ,
- &lp->rx_buffers_dma, GFP_KERNEL);
- if (!lp->rx_buffers) {
+ &q->rx_buffers_dma, GFP_KERNEL);
+ if (!q->rx_buffers) {
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
- lp->rx_ring, lp->rx_ring_dma);
- lp->rx_ring = NULL;
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
return -ENOMEM;
}
- addr = lp->rx_buffers_dma;
+ addr = q->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
- desc = macb_rx_desc(lp, i);
+ desc = macb_rx_desc(q, i);
macb_set_addr(lp, desc, addr);
desc->ctrl = 0;
addr += AT91ETHER_MAX_RBUFF_SZ;
@@ -3014,10 +3468,10 @@ static int at91ether_start(struct net_device *dev)
desc->addr |= MACB_BIT(RX_WRAP);
/* Reset buffer index */
- lp->rx_tail = 0;
+ q->rx_tail = 0;
/* Program address of descriptor list in Rx Buffer Queue register */
- macb_writel(lp, RBQP, lp->rx_ring_dma);
+ macb_writel(lp, RBQP, q->rx_ring_dma);
/* Enable Receive and Transmit */
ctl = macb_readl(lp, NCR);
@@ -3064,6 +3518,7 @@ static int at91ether_open(struct net_device *dev)
static int at91ether_close(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
u32 ctl;
/* Disable Receiver and Transmitter */
@@ -3084,13 +3539,13 @@ static int at91ether_close(struct net_device *dev)
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
- lp->rx_ring, lp->rx_ring_dma);
- lp->rx_ring = NULL;
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
- lp->rx_buffers, lp->rx_buffers_dma);
- lp->rx_buffers = NULL;
+ q->rx_buffers, q->rx_buffers_dma);
+ q->rx_buffers = NULL;
return 0;
}
@@ -3134,14 +3589,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void at91ether_rx(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc;
unsigned char *p_recv;
struct sk_buff *skb;
unsigned int pktlen;
- desc = macb_rx_desc(lp, lp->rx_tail);
+ desc = macb_rx_desc(q, q->rx_tail);
while (desc->addr & MACB_BIT(RX_USED)) {
- p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
+ p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb) {
@@ -3163,12 +3619,12 @@ static void at91ether_rx(struct net_device *dev)
desc->addr &= ~MACB_BIT(RX_USED);
/* wrap after last buffer */
- if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
- lp->rx_tail = 0;
+ if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
+ q->rx_tail = 0;
else
- lp->rx_tail++;
+ q->rx_tail++;
- desc = macb_rx_desc(lp, lp->rx_tail);
+ desc = macb_rx_desc(q, q->rx_tail);
}
}
@@ -3394,7 +3850,6 @@ static int macb_probe(struct platform_device *pdev)
= macb_config->clk_init;
int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
- struct device_node *phy_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
@@ -3500,18 +3955,6 @@ static int macb_probe(struct platform_device *pdev)
else
macb_get_hwaddr(bp);
- /* Power up the PHY if there is a GPIO reset */
- phy_node = of_get_next_available_child(np, NULL);
- if (phy_node) {
- int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
-
- if (gpio_is_valid(gpio)) {
- bp->reset_gpio = gpio_to_desc(gpio);
- gpiod_direction_output(bp->reset_gpio, 1);
- }
- }
- of_node_put(phy_node);
-
err = of_get_phy_mode(np);
if (err < 0) {
pdata = dev_get_platdata(&pdev->dev);
@@ -3542,6 +3985,9 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
+ tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
+ (unsigned long)bp);
+
phy_attached_info(phydev);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
@@ -3558,10 +4004,6 @@ err_out_unregister_mdio:
of_phy_deregister_fixed_link(np);
mdiobus_free(bp->mii_bus);
- /* Shutdown the PHY if there is a GPIO reset */
- if (bp->reset_gpio)
- gpiod_set_value(bp->reset_gpio, 0);
-
err_out_free_netdev:
free_netdev(dev);
@@ -3592,10 +4034,6 @@ static int macb_remove(struct platform_device *pdev)
dev->phydev = NULL;
mdiobus_free(bp->mii_bus);
- /* Shutdown the PHY if there is a GPIO reset */
- if (bp->reset_gpio)
- gpiod_set_value(bp->reset_gpio, 0);
-
unregister_netdev(dev);
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);