diff options
Diffstat (limited to 'drivers/net/ethernet/natsemi/sonic.c')
| -rw-r--r-- | drivers/net/ethernet/natsemi/sonic.c | 542 |
1 files changed, 328 insertions, 214 deletions
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 612c7a44b26c..825356ee3492 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * sonic.c * @@ -33,7 +34,57 @@ * the NetBSD file "sys/arch/mac68k/dev/if_sn.c". */ +static unsigned int version_printed; +static int sonic_debug = -1; +module_param(sonic_debug, int, 0); +MODULE_PARM_DESC(sonic_debug, "debug message level"); + +static void sonic_msg_init(struct net_device *dev) +{ + struct sonic_local *lp = netdev_priv(dev); + + lp->msg_enable = netif_msg_init(sonic_debug, 0); + + if (version_printed++ == 0) + netif_dbg(lp, drv, dev, "%s", version); +} + +static int sonic_alloc_descriptors(struct net_device *dev) +{ + struct sonic_local *lp = netdev_priv(dev); + + /* Allocate a chunk of memory for the descriptors. Note that this + * must not cross a 64K boundary. It is smaller than one page which + * means that page alignment is a sufficient condition. + */ + lp->descriptors = + dma_alloc_coherent(lp->device, + SIZEOF_SONIC_DESC * + SONIC_BUS_SCALE(lp->dma_bitmode), + &lp->descriptors_laddr, GFP_KERNEL); + + if (!lp->descriptors) + return -ENOMEM; + + lp->cda = lp->descriptors; + lp->tda = lp->cda + SIZEOF_SONIC_CDA * + SONIC_BUS_SCALE(lp->dma_bitmode); + lp->rda = lp->tda + SIZEOF_SONIC_TD * SONIC_NUM_TDS * + SONIC_BUS_SCALE(lp->dma_bitmode); + lp->rra = lp->rda + SIZEOF_SONIC_RD * SONIC_NUM_RDS * + SONIC_BUS_SCALE(lp->dma_bitmode); + + lp->cda_laddr = lp->descriptors_laddr; + lp->tda_laddr = lp->cda_laddr + SIZEOF_SONIC_CDA * + SONIC_BUS_SCALE(lp->dma_bitmode); + lp->rda_laddr = lp->tda_laddr + SIZEOF_SONIC_TD * SONIC_NUM_TDS * + SONIC_BUS_SCALE(lp->dma_bitmode); + lp->rra_laddr = lp->rda_laddr + SIZEOF_SONIC_RD * SONIC_NUM_RDS * + SONIC_BUS_SCALE(lp->dma_bitmode); + + return 0; +} /* * Open/initialize the SONIC controller. @@ -47,8 +98,9 @@ static int sonic_open(struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); int i; - if (sonic_debug > 2) - printk("sonic_open: initializing sonic driver.\n"); + netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__); + + spin_lock_init(&lp->lock); for (i = 0; i < SONIC_NUM_RRS; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); @@ -71,7 +123,7 @@ static int sonic_open(struct net_device *dev) for (i = 0; i < SONIC_NUM_RRS; i++) { dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!laddr) { + if (dma_mapping_error(lp->device, laddr)) { while(i > 0) { /* free any that were mapped successfully */ i--; dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); @@ -91,16 +143,33 @@ static int sonic_open(struct net_device *dev) /* * Initialize the SONIC */ - sonic_init(dev); + sonic_init(dev, true); netif_start_queue(dev); - if (sonic_debug > 2) - printk("sonic_open: Initialization done.\n"); + netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__); return 0; } +/* Wait for the SONIC to become idle. */ +static void sonic_quiesce(struct net_device *dev, u16 mask, bool may_sleep) +{ + struct sonic_local * __maybe_unused lp = netdev_priv(dev); + int i; + u16 bits; + + for (i = 0; i < 1000; ++i) { + bits = SONIC_READ(SONIC_CMD) & mask; + if (!bits) + return; + if (!may_sleep) + udelay(20); + else + usleep_range(100, 200); + } + WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits); +} /* * Close the SONIC device @@ -110,14 +179,16 @@ static int sonic_close(struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); int i; - if (sonic_debug > 2) - printk("sonic_close\n"); + netif_dbg(lp, ifdown, dev, "%s\n", __func__); netif_stop_queue(dev); /* * stop the SONIC, disable interrupts */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + sonic_quiesce(dev, SONIC_CR_ALL, true); + SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -149,7 +220,7 @@ static int sonic_close(struct net_device *dev) return 0; } -static void sonic_tx_timeout(struct net_device *dev) +static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct sonic_local *lp = netdev_priv(dev); int i; @@ -157,6 +228,9 @@ static void sonic_tx_timeout(struct net_device *dev) * put the Sonic into software-reset mode and * disable all interrupts before releasing DMA buffers */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + sonic_quiesce(dev, SONIC_CR_ALL, false); + SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -172,7 +246,7 @@ static void sonic_tx_timeout(struct net_device *dev) } } /* Try to restart the adaptor. */ - sonic_init(dev); + sonic_init(dev, false); lp->stats.tx_errors++; netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); @@ -194,8 +268,6 @@ static void sonic_tx_timeout(struct net_device *dev) * wake the tx queue * Concurrently with all of this, the SONIC is potentially writing to * the status flags of the TDs. - * Until some mutual exclusion is added, this code will not work with SMP. However, - * MIPS Jazz machines and m68k Macs were all uni-processor machines. */ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) @@ -203,10 +275,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); dma_addr_t laddr; int length; - int entry = lp->next_tx; + int entry; + unsigned long flags; - if (sonic_debug > 2) - printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev); + netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb); length = skb->len; if (length < ETH_ZLEN) { @@ -220,12 +292,16 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) */ laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); - if (!laddr) { - printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name); - dev_kfree_skb(skb); - return NETDEV_TX_BUSY; + if (dma_mapping_error(lp->device, laddr)) { + pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } + spin_lock_irqsave(&lp->lock, flags); + + entry = (lp->eol_tx + 1) & SONIC_TDS_MASK; + sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */ sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */ @@ -235,33 +311,28 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) sonic_tda_put(dev, entry, SONIC_TD_LINK, sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL); - /* - * Must set tx_skb[entry] only after clearing status, and - * before clearing EOL and before stopping queue - */ - wmb(); + sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, ~SONIC_EOL & + sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK)); + + netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__); + + SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + lp->tx_len[entry] = length; lp->tx_laddr[entry] = laddr; lp->tx_skb[entry] = skb; - wmb(); - sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, - sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL); lp->eol_tx = entry; - lp->next_tx = (entry + 1) & SONIC_TDS_MASK; - if (lp->tx_skb[lp->next_tx] != NULL) { + entry = (entry + 1) & SONIC_TDS_MASK; + if (lp->tx_skb[entry]) { /* The ring is full, the ISR has yet to process the next TD. */ - if (sonic_debug > 3) - printk("%s: stopping queue\n", dev->name); + netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__); netif_stop_queue(dev); /* after this packet, wait for ISR to free up some TDAs */ - } else netif_start_queue(dev); + } - if (sonic_debug > 2) - printk("sonic_send_packet: issuing Tx command\n"); - - SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } @@ -275,16 +346,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) struct net_device *dev = dev_id; struct sonic_local *lp = netdev_priv(dev); int status; + unsigned long flags; + + /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt() + * with sonic_send_packet() so that the two functions can share state. + * Secondly, it makes sonic_interrupt() re-entrant, as that is required + * by macsonic which must use two IRQs with different priority levels. + */ + spin_lock_irqsave(&lp->lock, flags); + + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; + if (!status) { + spin_unlock_irqrestore(&lp->lock, flags); - if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)) return IRQ_NONE; + } do { + SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */ + if (status & SONIC_INT_PKTRX) { - if (sonic_debug > 2) - printk("%s: packet rx\n", dev->name); + netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__); sonic_rx(dev); /* got packet(s) */ - SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */ } if (status & SONIC_INT_TXDN) { @@ -292,37 +375,38 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) int td_status; int freed_some = 0; - /* At this point, cur_tx is the index of a TD that is one of: - * unallocated/freed (status set & tx_skb[entry] clear) - * allocated and sent (status set & tx_skb[entry] set ) - * allocated and not yet sent (status clear & tx_skb[entry] set ) - * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear) + /* The state of a Transmit Descriptor may be inferred + * from { tx_skb[entry], td_status } as follows. + * { clear, clear } => the TD has never been used + * { set, clear } => the TD was handed to SONIC + * { set, set } => the TD was handed back + * { clear, set } => the TD is available for re-use */ - if (sonic_debug > 2) - printk("%s: tx done\n", dev->name); + netif_dbg(lp, intr, dev, "%s: tx done\n", __func__); while (lp->tx_skb[entry] != NULL) { if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) break; - if (td_status & 0x0001) { + if (td_status & SONIC_TCR_PTX) { lp->stats.tx_packets++; lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE); } else { - lp->stats.tx_errors++; - if (td_status & 0x0642) + if (td_status & (SONIC_TCR_EXD | + SONIC_TCR_EXC | SONIC_TCR_BCM)) lp->stats.tx_aborted_errors++; - if (td_status & 0x0180) + if (td_status & + (SONIC_TCR_NCRS | SONIC_TCR_CRLS)) lp->stats.tx_carrier_errors++; - if (td_status & 0x0020) + if (td_status & SONIC_TCR_OWC) lp->stats.tx_window_errors++; - if (td_status & 0x0004) + if (td_status & SONIC_TCR_FU) lp->stats.tx_fifo_errors++; } /* We must free the original skb */ - dev_kfree_skb_irq(lp->tx_skb[entry]); + dev_consume_skb_irq(lp->tx_skb[entry]); lp->tx_skb[entry] = NULL; /* and unmap DMA buffer */ dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE); @@ -339,50 +423,45 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if (freed_some || lp->tx_skb[entry] == NULL) netif_wake_queue(dev); /* The ring is no longer full */ lp->cur_tx = entry; - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */ } /* * check error conditions */ if (status & SONIC_INT_RFO) { - if (sonic_debug > 1) - printk("%s: rx fifo overrun\n", dev->name); - lp->stats.rx_fifo_errors++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */ + netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n", + __func__); } if (status & SONIC_INT_RDE) { - if (sonic_debug > 1) - printk("%s: rx descriptors exhausted\n", dev->name); - lp->stats.rx_dropped++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */ + netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n", + __func__); } if (status & SONIC_INT_RBAE) { - if (sonic_debug > 1) - printk("%s: rx buffer area exceeded\n", dev->name); - lp->stats.rx_dropped++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */ + netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n", + __func__); } /* counter overruns; all counters are 16bit wide */ - if (status & SONIC_INT_FAE) { + if (status & SONIC_INT_FAE) lp->stats.rx_frame_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */ - } - if (status & SONIC_INT_CRC) { + if (status & SONIC_INT_CRC) lp->stats.rx_crc_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */ - } - if (status & SONIC_INT_MP) { + if (status & SONIC_INT_MP) lp->stats.rx_missed_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */ - } /* transmit error */ if (status & SONIC_INT_TXER) { - if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2)) - printk(KERN_ERR "%s: tx fifo underrun\n", dev->name); - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */ + u16 tcr = SONIC_READ(SONIC_TCR); + + netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n", + __func__, tcr); + + if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC | + SONIC_TCR_FU | SONIC_TCR_BCM)) { + /* Aborted transmission. Try again. */ + netif_stop_queue(dev); + SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + } } /* bus retry */ @@ -392,112 +471,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) /* ... to help debug DMA problems causing endless interrupts. */ /* Bounce the eth interface to turn on the interrupt again. */ SONIC_WRITE(SONIC_IMR, 0); - SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */ } - /* load CAM done */ - if (status & SONIC_INT_LCD) - SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */ - } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)); + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; + } while (status); + + spin_unlock_irqrestore(&lp->lock, flags); + return IRQ_HANDLED; } +/* Return the array index corresponding to a given Receive Buffer pointer. */ +static int index_from_addr(struct sonic_local *lp, dma_addr_t addr, + unsigned int last) +{ + unsigned int i = last; + + do { + i = (i + 1) & SONIC_RRS_MASK; + if (addr == lp->rx_laddr[i]) + return i; + } while (i != last); + + return -ENOENT; +} + +/* Allocate and map a new skb to be used as a receive buffer. */ +static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp, + struct sk_buff **new_skb, dma_addr_t *new_addr) +{ + *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); + if (!*new_skb) + return false; + + if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) + skb_reserve(*new_skb, 2); + + *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE), + SONIC_RBSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(lp->device, *new_addr)) { + dev_kfree_skb(*new_skb); + *new_skb = NULL; + return false; + } + + return true; +} + +/* Place a new receive resource in the Receive Resource Area and update RWP. */ +static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp, + dma_addr_t old_addr, dma_addr_t new_addr) +{ + unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP)); + unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP)); + u32 buf; + + /* The resources in the range [RRP, RWP) belong to the SONIC. This loop + * scans the other resources in the RRA, those in the range [RWP, RRP). + */ + do { + buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) | + sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L); + + if (buf == old_addr) + break; + + entry = (entry + 1) & SONIC_RRS_MASK; + } while (entry != end); + + WARN_ONCE(buf != old_addr, "failed to find resource!\n"); + + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16); + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff); + + entry = (entry + 1) & SONIC_RRS_MASK; + + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry)); +} + /* * We have a good packet(s), pass it/them up the network stack. */ static void sonic_rx(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); - int status; int entry = lp->cur_rx; + int prev_entry = lp->eol_rx; + bool rbe = false; while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) { - struct sk_buff *used_skb; - struct sk_buff *new_skb; - dma_addr_t new_laddr; - u16 bufadr_l; - u16 bufadr_h; - int pkt_len; - - status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); - if (status & SONIC_RCR_PRX) { - /* Malloc up new buffer. */ - new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); - if (new_skb == NULL) { - lp->stats.rx_dropped++; + u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); + + /* If the RD has LPKT set, the chip has finished with the RB */ + if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) { + struct sk_buff *new_skb; + dma_addr_t new_laddr; + u32 addr = (sonic_rda_get(dev, entry, + SONIC_RD_PKTPTR_H) << 16) | + sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L); + int i = index_from_addr(lp, addr, entry); + + if (i < 0) { + WARN_ONCE(1, "failed to find buffer!\n"); break; } - /* provide 16 byte IP header alignment unless DMA requires otherwise */ - if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) - skb_reserve(new_skb, 2); - - new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE), - SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!new_laddr) { - dev_kfree_skb(new_skb); - printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name); + + if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) { + struct sk_buff *used_skb = lp->rx_skb[i]; + int pkt_len; + + /* Pass the used buffer up the stack */ + dma_unmap_single(lp->device, addr, SONIC_RBSIZE, + DMA_FROM_DEVICE); + + pkt_len = sonic_rda_get(dev, entry, + SONIC_RD_PKTLEN); + skb_trim(used_skb, pkt_len); + used_skb->protocol = eth_type_trans(used_skb, + dev); + netif_rx(used_skb); + lp->stats.rx_packets++; + lp->stats.rx_bytes += pkt_len; + + lp->rx_skb[i] = new_skb; + lp->rx_laddr[i] = new_laddr; + } else { + /* Failed to obtain a new buffer so re-use it */ + new_laddr = addr; lp->stats.rx_dropped++; - break; } - - /* now we have a new skb to replace it, pass the used one up the stack */ - dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE); - used_skb = lp->rx_skb[entry]; - pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN); - skb_trim(used_skb, pkt_len); - used_skb->protocol = eth_type_trans(used_skb, dev); - netif_rx(used_skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; - - /* and insert the new skb */ - lp->rx_laddr[entry] = new_laddr; - lp->rx_skb[entry] = new_skb; - - bufadr_l = (unsigned long)new_laddr & 0xffff; - bufadr_h = (unsigned long)new_laddr >> 16; - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l); - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h); - } else { - /* This should only happen, if we enable accepting broken packets. */ - lp->stats.rx_errors++; - if (status & SONIC_RCR_FAER) - lp->stats.rx_frame_errors++; - if (status & SONIC_RCR_CRCR) - lp->stats.rx_crc_errors++; - } - if (status & SONIC_RCR_LPKT) { - /* - * this was the last packet out of the current receive buffer - * give the buffer back to the SONIC + /* If RBE is already asserted when RWP advances then + * it's safe to clear RBE after processing this packet. */ - lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); - if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff; - SONIC_WRITE(SONIC_RWP, lp->cur_rwp); - if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) { - if (sonic_debug > 2) - printk("%s: rx buffer exhausted\n", dev->name); - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */ - } - } else - printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n", - dev->name); + rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE; + sonic_update_rra(dev, lp, addr, new_laddr); + } /* * give back the descriptor */ - sonic_rda_put(dev, entry, SONIC_RD_LINK, - sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL); + sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0); sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1); - sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, - sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL); - lp->eol_rx = entry; - lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK; + + prev_entry = entry; + entry = (entry + 1) & SONIC_RDS_MASK; } - /* - * If any worth-while packets have been received, netif_rx() - * has done a mark_bh(NET_BH) for us and will work on them - * when we get to the bottom-half routine. - */ + + lp->cur_rx = entry; + + if (prev_entry != lp->eol_rx) { + /* Advance the EOL flag to put descriptors back into service */ + sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL | + sonic_rda_get(dev, prev_entry, SONIC_RD_LINK)); + sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL & + sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK)); + lp->eol_rx = prev_entry; + } + + if (rbe) + SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); } @@ -542,9 +673,10 @@ static void sonic_multicast_list(struct net_device *dev) (netdev_mc_count(dev) > 15)) { rcr |= SONIC_RCR_AMC; } else { - if (sonic_debug > 2) - printk("sonic_multicast_list: mc_count %d\n", - netdev_mc_count(dev)); + unsigned long flags; + + netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__, + netdev_mc_count(dev)); sonic_set_cam_enable(dev, 1); /* always enable our own address */ i = 1; netdev_for_each_mc_addr(ha, dev) { @@ -556,14 +688,18 @@ static void sonic_multicast_list(struct net_device *dev) i++; } SONIC_WRITE(SONIC_CDC, 16); - /* issue Load CAM command */ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); + + /* LCAM and TXP commands can't be used simultaneously */ + spin_lock_irqsave(&lp->lock, flags); + sonic_quiesce(dev, SONIC_CR_TXP, false); SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); + sonic_quiesce(dev, SONIC_CR_LCAM, false); + spin_unlock_irqrestore(&lp->lock, flags); } } - if (sonic_debug > 2) - printk("sonic_multicast_list: setting RCR=%x\n", rcr); + netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr); SONIC_WRITE(SONIC_RCR, rcr); } @@ -572,9 +708,8 @@ static void sonic_multicast_list(struct net_device *dev) /* * Initialize the SONIC ethernet controller. */ -static int sonic_init(struct net_device *dev) +static int sonic_init(struct net_device *dev, bool may_sleep) { - unsigned int cmd; struct sonic_local *lp = netdev_priv(dev); int i; @@ -586,18 +721,22 @@ static int sonic_init(struct net_device *dev) SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); + /* While in reset mode, clear CAM Enable register */ + SONIC_WRITE(SONIC_CE, 0); + /* * clear software reset flag, disable receiver, clear and * enable interrupts, then completely initialize the SONIC */ SONIC_WRITE(SONIC_CMD, 0); - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP); + sonic_quiesce(dev, SONIC_CR_ALL, may_sleep); /* * initialize the receive resource area */ - if (sonic_debug > 2) - printk("sonic_init: initialize receive resource area\n"); + netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n", + __func__); for (i = 0; i < SONIC_NUM_RRS; i++) { u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff; @@ -609,39 +748,27 @@ static int sonic_init(struct net_device *dev) } /* initialize all RRA registers */ - lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR * - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; - lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR * - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; - - SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff); - SONIC_WRITE(SONIC_REA, lp->rra_end); - SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff); - SONIC_WRITE(SONIC_RWP, lp->cur_rwp); + SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0)); + SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS)); + SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0)); + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1)); SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16); SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1)); /* load the resource pointers */ - if (sonic_debug > 3) - printk("sonic_init: issuing RRRA command\n"); + netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__); SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); - i = 0; - while (i++ < 100) { - if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA) - break; - } - - if (sonic_debug > 2) - printk("sonic_init: status=%x i=%d\n", SONIC_READ(SONIC_CMD), i); + sonic_quiesce(dev, SONIC_CR_RRRA, may_sleep); /* * Initialize the receive descriptors so that they * become a circular linked list, ie. let the last * descriptor point to the first again. */ - if (sonic_debug > 2) - printk("sonic_init: initialize receive descriptors\n"); + netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n", + __func__); + for (i=0; i<SONIC_NUM_RDS; i++) { sonic_rda_put(dev, i, SONIC_RD_STATUS, 0); sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0); @@ -664,8 +791,9 @@ static int sonic_init(struct net_device *dev) /* * initialize transmit descriptors */ - if (sonic_debug > 2) - printk("sonic_init: initialize transmit descriptors\n"); + netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n", + __func__); + for (i = 0; i < SONIC_NUM_TDS; i++) { sonic_tda_put(dev, i, SONIC_TD_STATUS, 0); sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0); @@ -682,7 +810,7 @@ static int sonic_init(struct net_device *dev) SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16); SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff); - lp->cur_tx = lp->next_tx = 0; + lp->cur_tx = 0; lp->eol_tx = SONIC_NUM_TDS - 1; /* @@ -706,34 +834,20 @@ static int sonic_init(struct net_device *dev) * load the CAM */ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); - - i = 0; - while (i++ < 100) { - if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD) - break; - } - if (sonic_debug > 2) { - printk("sonic_init: CMD=%x, ISR=%x\n, i=%d", - SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i); - } + sonic_quiesce(dev, SONIC_CR_LCAM, may_sleep); /* * enable receiver, disable loopback * and enable all interrupts */ - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP); SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT); SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN); - cmd = SONIC_READ(SONIC_CMD); - if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0) - printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd); - - if (sonic_debug > 2) - printk("sonic_init: new status=%x\n", - SONIC_READ(SONIC_CMD)); + netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__, + SONIC_READ(SONIC_CMD)); return 0; } |
