diff options
Diffstat (limited to 'drivers/net/ethernet/dlink/sundance.c')
| -rw-r--r-- | drivers/net/ethernet/dlink/sundance.c | 103 |
1 files changed, 54 insertions, 49 deletions
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index ee0ca712dd1c..277c50ef773f 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -340,7 +340,7 @@ enum wake_event_bits { struct netdev_desc { __le32 next_desc; __le32 status; - struct desc_frag { __le32 addr, length; } frag[1]; + struct desc_frag { __le32 addr, length; } frag; }; /* Bits in netdev_desc.status */ @@ -479,7 +479,7 @@ static const struct net_device_ops netdev_ops = { .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = tx_timeout, .ndo_change_mtu = change_mtu, .ndo_set_mac_address = sundance_set_mac_addr, @@ -508,6 +508,7 @@ static int sundance_probe1(struct pci_dev *pdev, int bar = 1; #endif int phy, phy_end, phy_idx = 0; + __le16 addr[ETH_ALEN / 2]; if (pci_enable_device(pdev)) return -EIO; @@ -528,8 +529,9 @@ static int sundance_probe1(struct pci_dev *pdev, goto err_out_res; for (i = 0; i < 3; i++) - ((__le16 *)dev->dev_addr)[i] = + addr[i] = cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); + eth_hw_addr_set(dev, (u8 *)addr); np = netdev_priv(dev); np->ndev = dev; @@ -706,7 +708,7 @@ static int change_mtu(struct net_device *dev, int new_mtu) { if (netif_running(dev)) return -EBUSY; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -940,7 +942,7 @@ static void check_duplex(struct net_device *dev) static void netdev_timer(struct timer_list *t) { - struct netdev_private *np = from_timer(np, t, timer); + struct netdev_private *np = timer_container_of(np, t, timer); struct net_device *dev = np->mii_if.dev; void __iomem *ioaddr = np->base; int next_tick = 10*HZ; @@ -978,8 +980,8 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue) le32_to_cpu(np->tx_ring[i].next_desc), le32_to_cpu(np->tx_ring[i].status), (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, - le32_to_cpu(np->tx_ring[i].frag[0].addr), - le32_to_cpu(np->tx_ring[i].frag[0].length)); + le32_to_cpu(np->tx_ring[i].frag.addr), + le32_to_cpu(np->tx_ring[i].frag.length)); } printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", ioread32(np->base + TxListPtr), @@ -1025,28 +1027,29 @@ static void init_ring(struct net_device *dev) np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); np->rx_ring[i].status = 0; - np->rx_ring[i].frag[0].length = 0; + np->rx_ring[i].frag.length = 0; np->rx_skbuff[i] = NULL; } /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { + dma_addr_t addr; + struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); np->rx_skbuff[i] = skb; if (skb == NULL) break; skb_reserve(skb, 2); /* 16 byte align the IP header. */ - np->rx_ring[i].frag[0].addr = cpu_to_le32( - dma_map_single(&np->pci_dev->dev, skb->data, - np->rx_buf_sz, DMA_FROM_DEVICE)); - if (dma_mapping_error(&np->pci_dev->dev, - np->rx_ring[i].frag[0].addr)) { + addr = dma_map_single(&np->pci_dev->dev, skb->data, + np->rx_buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, addr)) { dev_kfree_skb(skb); np->rx_skbuff[i] = NULL; break; } - np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); + np->rx_ring[i].frag.addr = cpu_to_le32(addr); + np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag); } np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); @@ -1086,6 +1089,7 @@ start_tx (struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); struct netdev_desc *txdesc; + dma_addr_t addr; unsigned entry; /* Calculate the next Tx descriptor entry. */ @@ -1093,14 +1097,15 @@ start_tx (struct sk_buff *skb, struct net_device *dev) np->tx_skbuff[entry] = skb; txdesc = &np->tx_ring[entry]; + addr = dma_map_single(&np->pci_dev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, addr)) + goto drop_frame; + txdesc->next_desc = 0; txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); - txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, - skb->data, skb->len, DMA_TO_DEVICE)); - if (dma_mapping_error(&np->pci_dev->dev, - txdesc->frag[0].addr)) - goto drop_frame; - txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); + txdesc->frag.addr = cpu_to_le32(addr); + txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag); /* Increment cur_tx before tasklet_schedule() */ np->cur_tx++; @@ -1149,7 +1154,7 @@ reset_tx (struct net_device *dev) skb = np->tx_skbuff[i]; if (skb) { dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(np->tx_ring[i].frag[0].addr), + le32_to_cpu(np->tx_ring[i].frag.addr), skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); np->tx_skbuff[i] = NULL; @@ -1269,12 +1274,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) skb = np->tx_skbuff[entry]; /* Free the original skb. */ dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(np->tx_ring[entry].frag[0].addr), + le32_to_cpu(np->tx_ring[entry].frag.addr), skb->len, DMA_TO_DEVICE); dev_consume_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; - np->tx_ring[entry].frag[0].addr = 0; - np->tx_ring[entry].frag[0].length = 0; + np->tx_ring[entry].frag.addr = 0; + np->tx_ring[entry].frag.length = 0; } spin_unlock(&np->lock); } else { @@ -1288,12 +1293,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) skb = np->tx_skbuff[entry]; /* Free the original skb. */ dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(np->tx_ring[entry].frag[0].addr), + le32_to_cpu(np->tx_ring[entry].frag.addr), skb->len, DMA_TO_DEVICE); dev_consume_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; - np->tx_ring[entry].frag[0].addr = 0; - np->tx_ring[entry].frag[0].length = 0; + np->tx_ring[entry].frag.addr = 0; + np->tx_ring[entry].frag.length = 0; } spin_unlock(&np->lock); } @@ -1370,16 +1375,16 @@ static void rx_poll(struct tasklet_struct *t) (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* 16 byte align the IP header */ dma_sync_single_for_cpu(&np->pci_dev->dev, - le32_to_cpu(desc->frag[0].addr), + le32_to_cpu(desc->frag.addr), np->rx_buf_sz, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); dma_sync_single_for_device(&np->pci_dev->dev, - le32_to_cpu(desc->frag[0].addr), + le32_to_cpu(desc->frag.addr), np->rx_buf_sz, DMA_FROM_DEVICE); skb_put(skb, pkt_len); } else { dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(desc->frag[0].addr), + le32_to_cpu(desc->frag.addr), np->rx_buf_sz, DMA_FROM_DEVICE); skb_put(skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; @@ -1412,12 +1417,13 @@ static void refill_rx (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int entry; - int cnt = 0; /* Refill the Rx ring buffers. */ for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { struct sk_buff *skb; + dma_addr_t addr; + entry = np->dirty_rx % RX_RING_SIZE; if (np->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); @@ -1425,21 +1431,20 @@ static void refill_rx (struct net_device *dev) if (skb == NULL) break; /* Better luck next round. */ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - np->rx_ring[entry].frag[0].addr = cpu_to_le32( - dma_map_single(&np->pci_dev->dev, skb->data, - np->rx_buf_sz, DMA_FROM_DEVICE)); - if (dma_mapping_error(&np->pci_dev->dev, - np->rx_ring[entry].frag[0].addr)) { + addr = dma_map_single(&np->pci_dev->dev, skb->data, + np->rx_buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, addr)) { dev_kfree_skb_irq(skb); np->rx_skbuff[entry] = NULL; break; } + + np->rx_ring[entry].frag.addr = cpu_to_le32(addr); } /* Perhaps we need not reset this field. */ - np->rx_ring[entry].frag[0].length = + np->rx_ring[entry].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag); np->rx_ring[entry].status = 0; - cnt++; } } static void netdev_error(struct net_device *dev, int intr_status) @@ -1611,7 +1616,7 @@ static int sundance_set_mac_addr(struct net_device *dev, void *data) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); __set_mac_addr(dev); return 0; @@ -1642,8 +1647,8 @@ static int check_if_running(struct net_device *dev) static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int get_link_ksettings(struct net_device *dev, @@ -1868,21 +1873,21 @@ static int netdev_close(struct net_device *dev) (int)(np->tx_ring_dma)); for (i = 0; i < TX_RING_SIZE; i++) printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n", - i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr, - np->tx_ring[i].frag[0].length); + i, np->tx_ring[i].status, np->tx_ring[i].frag.addr, + np->tx_ring[i].frag.length); printk(KERN_DEBUG " Rx ring %8.8x:\n", (int)(np->rx_ring_dma)); for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) { printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", - i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr, - np->rx_ring[i].frag[0].length); + i, np->rx_ring[i].status, np->rx_ring[i].frag.addr, + np->rx_ring[i].frag.length); } } #endif /* __i386__ debugging only */ free_irq(np->pci_dev->irq, dev); - del_timer_sync(&np->timer); + timer_delete_sync(&np->timer); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { @@ -1890,19 +1895,19 @@ static int netdev_close(struct net_device *dev) skb = np->rx_skbuff[i]; if (skb) { dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(np->rx_ring[i].frag[0].addr), + le32_to_cpu(np->rx_ring[i].frag.addr), np->rx_buf_sz, DMA_FROM_DEVICE); dev_kfree_skb(skb); np->rx_skbuff[i] = NULL; } - np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */ + np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */ } for (i = 0; i < TX_RING_SIZE; i++) { np->tx_ring[i].next_desc = 0; skb = np->tx_skbuff[i]; if (skb) { dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(np->tx_ring[i].frag[0].addr), + le32_to_cpu(np->tx_ring[i].frag.addr), skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); np->tx_skbuff[i] = NULL; |
