diff options
Diffstat (limited to 'drivers/net/ethernet/packetengines/yellowfin.c')
| -rw-r--r-- | drivers/net/ethernet/packetengines/yellowfin.c | 130 |
1 files changed, 72 insertions, 58 deletions
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index fa7770da6ef8..1e25ac13a7d8 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -102,7 +102,7 @@ static int gx_fix; #include <linux/bitops.h> #include <linux/uaccess.h> #include <asm/processor.h> /* Processor type for cache alignment. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <asm/io.h> /* These identify the driver base version and may not be removed. */ @@ -191,7 +191,7 @@ IV. Notes Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards. Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board -and an AlphaStation to verifty the Alpha port! +and an AlphaStation to verify the Alpha port! IVb. References @@ -343,8 +343,8 @@ static int mdio_read(void __iomem *ioaddr, int phy_id, int location); static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int yellowfin_open(struct net_device *dev); -static void yellowfin_timer(unsigned long data); -static void yellowfin_tx_timeout(struct net_device *dev); +static void yellowfin_timer(struct timer_list *t); +static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue); static int yellowfin_init_ring(struct net_device *dev); static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev); @@ -362,7 +362,7 @@ static const struct net_device_ops netdev_ops = { .ndo_set_rx_mode = set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = yellowfin_tx_timeout, }; @@ -384,6 +384,7 @@ static int yellowfin_init_one(struct pci_dev *pdev, #else int bar = 1; #endif + u8 addr[ETH_ALEN]; /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -416,12 +417,13 @@ static int yellowfin_init_one(struct pci_dev *pdev, if (drv_flags & DontUseEeprom) for (i = 0; i < 6; i++) - dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i); + addr[i] = ioread8(ioaddr + StnAddr + i); else { int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0); for (i = 0; i < 6; i++) - dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i); + addr[i] = read_eeprom(ioaddr, ee_offset + i); } + eth_hw_addr_set(dev, addr); /* Reset the chip. */ iowrite32(0x80000000, ioaddr + DMACtrl); @@ -434,19 +436,22 @@ static int yellowfin_init_one(struct pci_dev *pdev, np->drv_flags = drv_flags; np->base = ioaddr; - ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, + GFP_KERNEL); if (!ring_space) goto err_out_cleardev; np->tx_ring = ring_space; np->tx_ring_dma = ring_dma; - ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, + GFP_KERNEL); if (!ring_space) goto err_out_unmap_tx; np->rx_ring = ring_space; np->rx_ring_dma = ring_dma; - ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE, + &ring_dma, GFP_KERNEL); if (!ring_space) goto err_out_unmap_rx; np->tx_status = ring_space; @@ -505,12 +510,14 @@ static int yellowfin_init_one(struct pci_dev *pdev, return 0; err_out_unmap_status: - pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, - np->tx_status_dma); + dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status, + np->tx_status_dma); err_out_unmap_rx: - pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, + np->rx_ring_dma); err_out_unmap_tx: - pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, + np->tx_ring_dma); err_out_cleardev: pci_iounmap(pdev, ioaddr); err_out_free_res: @@ -632,10 +639,8 @@ static int yellowfin_open(struct net_device *dev) } /* Set the timer to check for link beat. */ - init_timer(&yp->timer); + timer_setup(&yp->timer, yellowfin_timer, 0); yp->timer.expires = jiffies + 3*HZ; - yp->timer.data = (unsigned long)dev; - yp->timer.function = yellowfin_timer; /* timer handler */ add_timer(&yp->timer); out: return rc; @@ -645,10 +650,10 @@ err_free_irq: goto out; } -static void yellowfin_timer(unsigned long data) +static void yellowfin_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct yellowfin_private *yp = netdev_priv(dev); + struct yellowfin_private *yp = timer_container_of(yp, t, timer); + struct net_device *dev = pci_get_drvdata(yp->pci_dev); void __iomem *ioaddr = yp->base; int next_tick = 60*HZ; @@ -679,7 +684,7 @@ static void yellowfin_timer(unsigned long data) add_timer(&yp->timer); } -static void yellowfin_tx_timeout(struct net_device *dev) +static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct yellowfin_private *yp = netdev_priv(dev); void __iomem *ioaddr = yp->base; @@ -742,8 +747,10 @@ static int yellowfin_init_ring(struct net_device *dev) if (skb == NULL) break; skb_reserve(skb, 2); /* 16 byte align the IP header. */ - yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, - skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, + skb->data, + yp->rx_buf_sz, + DMA_FROM_DEVICE)); } if (i != RX_RING_SIZE) { for (j = 0; j < i; j++) @@ -833,8 +840,9 @@ static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, yp->tx_skbuff[entry] = skb; #ifdef NO_TXSTATS - yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, - skb->data, len, PCI_DMA_TODEVICE)); + yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, + skb->data, + len, DMA_TO_DEVICE)); yp->tx_ring[entry].result_status = 0; if (entry >= TX_RING_SIZE-1) { /* New stop command. */ @@ -849,8 +857,9 @@ static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, yp->cur_tx++; #else yp->tx_ring[entry<<1].request_cnt = len; - yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev, - skb->data, len, PCI_DMA_TODEVICE)); + yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, + skb->data, + len, DMA_TO_DEVICE)); /* The input_last (status-write) command is constant, but we must rewrite the subsequent 'stop' command. */ @@ -925,9 +934,10 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* Free the original skb. */ - pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr), - skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dma_unmap_single(&yp->pci_dev->dev, + le32_to_cpu(yp->tx_ring[entry].addr), + skb->len, DMA_TO_DEVICE); + dev_consume_skb_irq(skb); yp->tx_skbuff[entry] = NULL; } if (yp->tx_full && @@ -982,10 +992,10 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) dev->stats.tx_packets++; } /* Free the original skb. */ - pci_unmap_single(yp->pci_dev, - yp->tx_ring[entry<<1].addr, skb->len, - PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dma_unmap_single(&yp->pci_dev->dev, + yp->tx_ring[entry << 1].addr, + skb->len, DMA_TO_DEVICE); + dev_consume_skb_irq(skb); yp->tx_skbuff[entry] = 0; /* Mark status as empty. */ yp->tx_status[entry].tx_errs = 0; @@ -1052,13 +1062,14 @@ static int yellowfin_rx(struct net_device *dev) struct sk_buff *rx_skb = yp->rx_skbuff[entry]; s16 frame_status; u16 desc_status; - int data_size, yf_size; + int data_size, __maybe_unused yf_size; u8 *buf_addr; if(!desc->result_status) break; - pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr), - yp->rx_buf_sz, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&yp->pci_dev->dev, + le32_to_cpu(desc->addr), + yp->rx_buf_sz, DMA_FROM_DEVICE); desc_status = le32_to_cpu(desc->result_status) >> 16; buf_addr = rx_skb->data; data_size = (le32_to_cpu(desc->dbdma_cmd) - @@ -1123,10 +1134,10 @@ static int yellowfin_rx(struct net_device *dev) without copying to a properly sized skbuff. */ if (pkt_len > rx_copybreak) { skb_put(skb = rx_skb, pkt_len); - pci_unmap_single(yp->pci_dev, - le32_to_cpu(yp->rx_ring[entry].addr), - yp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&yp->pci_dev->dev, + le32_to_cpu(yp->rx_ring[entry].addr), + yp->rx_buf_sz, + DMA_FROM_DEVICE); yp->rx_skbuff[entry] = NULL; } else { skb = netdev_alloc_skb(dev, pkt_len + 2); @@ -1135,10 +1146,10 @@ static int yellowfin_rx(struct net_device *dev) skb_reserve(skb, 2); /* 16 byte align the IP header */ skb_copy_to_linear_data(skb, rx_skb->data, pkt_len); skb_put(skb, pkt_len); - pci_dma_sync_single_for_device(yp->pci_dev, - le32_to_cpu(desc->addr), - yp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&yp->pci_dev->dev, + le32_to_cpu(desc->addr), + yp->rx_buf_sz, + DMA_FROM_DEVICE); } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); @@ -1157,8 +1168,10 @@ static int yellowfin_rx(struct net_device *dev) break; /* Better luck next round. */ yp->rx_skbuff[entry] = skb; skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, - skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, + skb->data, + yp->rx_buf_sz, + DMA_FROM_DEVICE)); } yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */ @@ -1209,7 +1222,7 @@ static int yellowfin_close(struct net_device *dev) iowrite32(0x80000000, ioaddr + RxCtrl); iowrite32(0x80000000, ioaddr + TxCtrl); - del_timer(&yp->timer); + timer_delete(&yp->timer); #if defined(__i386__) if (yellowfin_debug > 2) { @@ -1260,8 +1273,7 @@ static int yellowfin_close(struct net_device *dev) yp->rx_skbuff[i] = NULL; } for (i = 0; i < TX_RING_SIZE; i++) { - if (yp->tx_skbuff[i]) - dev_kfree_skb(yp->tx_skbuff[i]); + dev_kfree_skb(yp->tx_skbuff[i]); yp->tx_skbuff[i] = NULL; } @@ -1328,9 +1340,9 @@ static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo { struct yellowfin_private *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->version, DRV_VERSION, sizeof(info->version)); + strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static const struct ethtool_ops ethtool_ops = { @@ -1346,7 +1358,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = np->phys[0] & 0x1f; - /* Fall Through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f); @@ -1382,10 +1394,12 @@ static void yellowfin_remove_one(struct pci_dev *pdev) BUG_ON(!dev); np = netdev_priv(dev); - pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, - np->tx_status_dma); - pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); - pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); + dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status, + np->tx_status_dma); + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, + np->rx_ring_dma); + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, + np->tx_ring_dma); unregister_netdev (dev); pci_iounmap(pdev, np->base); |
