diff options
Diffstat (limited to 'drivers/net/ethernet/alteon/acenic.c')
| -rw-r--r-- | drivers/net/ethernet/alteon/acenic.c | 210 |
1 files changed, 101 insertions, 109 deletions
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 5d192d551623..9e6f91df2ba0 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -465,6 +465,7 @@ static int acenic_probe_one(struct pci_dev *pdev, SET_NETDEV_DEV(dev, &pdev->dev); ap = netdev_priv(dev); + ap->ndev = dev; ap->pdev = pdev; ap->name = pci_name(pdev); @@ -547,7 +548,7 @@ static int acenic_probe_one(struct pci_dev *pdev, ap->name); break; } - /* Fall through */ + fallthrough; case PCI_VENDOR_ID_SGI: printk(KERN_INFO "%s: SGI AceNIC ", ap->name); break; @@ -588,8 +589,7 @@ static int acenic_probe_one(struct pci_dev *pdev, } ap->name = dev->name; - if (ap->pci_using_dac) - dev->features |= NETIF_F_HIGHDMA; + dev->features |= NETIF_F_HIGHDMA; pci_set_drvdata(pdev, dev); @@ -642,9 +642,8 @@ static void acenic_remove_one(struct pci_dev *pdev) ringp = &ap->skb->rx_std_skbuff[i]; mapping = dma_unmap_addr(ringp, mapping); - pci_unmap_page(ap->pdev, mapping, - ACE_STD_BUFSIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&ap->pdev->dev, mapping, + ACE_STD_BUFSIZE, DMA_FROM_DEVICE); ap->rx_std_ring[i].size = 0; ap->skb->rx_std_skbuff[i].skb = NULL; @@ -662,9 +661,9 @@ static void acenic_remove_one(struct pci_dev *pdev) ringp = &ap->skb->rx_mini_skbuff[i]; mapping = dma_unmap_addr(ringp,mapping); - pci_unmap_page(ap->pdev, mapping, + dma_unmap_page(&ap->pdev->dev, mapping, ACE_MINI_BUFSIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); ap->rx_mini_ring[i].size = 0; ap->skb->rx_mini_skbuff[i].skb = NULL; @@ -681,9 +680,8 @@ static void acenic_remove_one(struct pci_dev *pdev) ringp = &ap->skb->rx_jumbo_skbuff[i]; mapping = dma_unmap_addr(ringp, mapping); - pci_unmap_page(ap->pdev, mapping, - ACE_JUMBO_BUFSIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&ap->pdev->dev, mapping, + ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE); ap->rx_jumbo_ring[i].size = 0; ap->skb->rx_jumbo_skbuff[i].skb = NULL; @@ -713,8 +711,8 @@ static void ace_free_descriptors(struct net_device *dev) RX_JUMBO_RING_ENTRIES + RX_MINI_RING_ENTRIES + RX_RETURN_RING_ENTRIES)); - pci_free_consistent(ap->pdev, size, ap->rx_std_ring, - ap->rx_ring_base_dma); + dma_free_coherent(&ap->pdev->dev, size, ap->rx_std_ring, + ap->rx_ring_base_dma); ap->rx_std_ring = NULL; ap->rx_jumbo_ring = NULL; ap->rx_mini_ring = NULL; @@ -722,31 +720,30 @@ static void ace_free_descriptors(struct net_device *dev) } if (ap->evt_ring != NULL) { size = (sizeof(struct event) * EVT_RING_ENTRIES); - pci_free_consistent(ap->pdev, size, ap->evt_ring, - ap->evt_ring_dma); + dma_free_coherent(&ap->pdev->dev, size, ap->evt_ring, + ap->evt_ring_dma); ap->evt_ring = NULL; } if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) { size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); - pci_free_consistent(ap->pdev, size, ap->tx_ring, - ap->tx_ring_dma); + dma_free_coherent(&ap->pdev->dev, size, ap->tx_ring, + ap->tx_ring_dma); } ap->tx_ring = NULL; if (ap->evt_prd != NULL) { - pci_free_consistent(ap->pdev, sizeof(u32), - (void *)ap->evt_prd, ap->evt_prd_dma); + dma_free_coherent(&ap->pdev->dev, sizeof(u32), + (void *)ap->evt_prd, ap->evt_prd_dma); ap->evt_prd = NULL; } if (ap->rx_ret_prd != NULL) { - pci_free_consistent(ap->pdev, sizeof(u32), - (void *)ap->rx_ret_prd, - ap->rx_ret_prd_dma); + dma_free_coherent(&ap->pdev->dev, sizeof(u32), + (void *)ap->rx_ret_prd, ap->rx_ret_prd_dma); ap->rx_ret_prd = NULL; } if (ap->tx_csm != NULL) { - pci_free_consistent(ap->pdev, sizeof(u32), - (void *)ap->tx_csm, ap->tx_csm_dma); + dma_free_coherent(&ap->pdev->dev, sizeof(u32), + (void *)ap->tx_csm, ap->tx_csm_dma); ap->tx_csm = NULL; } } @@ -763,8 +760,8 @@ static int ace_allocate_descriptors(struct net_device *dev) RX_MINI_RING_ENTRIES + RX_RETURN_RING_ENTRIES)); - ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size, - &ap->rx_ring_base_dma); + ap->rx_std_ring = dma_alloc_coherent(&ap->pdev->dev, size, + &ap->rx_ring_base_dma, GFP_KERNEL); if (ap->rx_std_ring == NULL) goto fail; @@ -774,7 +771,8 @@ static int ace_allocate_descriptors(struct net_device *dev) size = (sizeof(struct event) * EVT_RING_ENTRIES); - ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma); + ap->evt_ring = dma_alloc_coherent(&ap->pdev->dev, size, + &ap->evt_ring_dma, GFP_KERNEL); if (ap->evt_ring == NULL) goto fail; @@ -786,25 +784,25 @@ static int ace_allocate_descriptors(struct net_device *dev) if (!ACE_IS_TIGON_I(ap)) { size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); - ap->tx_ring = pci_alloc_consistent(ap->pdev, size, - &ap->tx_ring_dma); + ap->tx_ring = dma_alloc_coherent(&ap->pdev->dev, size, + &ap->tx_ring_dma, GFP_KERNEL); if (ap->tx_ring == NULL) goto fail; } - ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), - &ap->evt_prd_dma); + ap->evt_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32), + &ap->evt_prd_dma, GFP_KERNEL); if (ap->evt_prd == NULL) goto fail; - ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), - &ap->rx_ret_prd_dma); + ap->rx_ret_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32), + &ap->rx_ret_prd_dma, GFP_KERNEL); if (ap->rx_ret_prd == NULL) goto fail; - ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32), - &ap->tx_csm_dma); + ap->tx_csm = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32), + &ap->tx_csm_dma, GFP_KERNEL); if (ap->tx_csm == NULL) goto fail; @@ -830,8 +828,8 @@ static void ace_init_cleanup(struct net_device *dev) ace_free_descriptors(dev); if (ap->info) - pci_free_consistent(ap->pdev, sizeof(struct ace_info), - ap->info, ap->info_dma); + dma_free_coherent(&ap->pdev->dev, sizeof(struct ace_info), + ap->info, ap->info_dma); kfree(ap->skb); kfree(ap->trace_buf); @@ -870,6 +868,7 @@ static int ace_init(struct net_device *dev) int board_idx, ecode = 0; short i; unsigned char cache_size; + u8 addr[ETH_ALEN]; ap = netdev_priv(dev); regs = ap->regs; @@ -989,12 +988,13 @@ static int ace_init(struct net_device *dev) writel(mac1, ®s->MacAddrHi); writel(mac2, ®s->MacAddrLo); - dev->dev_addr[0] = (mac1 >> 8) & 0xff; - dev->dev_addr[1] = mac1 & 0xff; - dev->dev_addr[2] = (mac2 >> 24) & 0xff; - dev->dev_addr[3] = (mac2 >> 16) & 0xff; - dev->dev_addr[4] = (mac2 >> 8) & 0xff; - dev->dev_addr[5] = mac2 & 0xff; + addr[0] = (mac1 >> 8) & 0xff; + addr[1] = mac1 & 0xff; + addr[2] = (mac2 >> 24) & 0xff; + addr[3] = (mac2 >> 16) & 0xff; + addr[4] = (mac2 >> 8) & 0xff; + addr[5] = mac2 & 0xff; + eth_hw_addr_set(dev, addr); printk("MAC: %pM\n", dev->dev_addr); @@ -1129,11 +1129,7 @@ static int ace_init(struct net_device *dev) /* * Configure DMA attributes. */ - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - ap->pci_using_dac = 1; - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { - ap->pci_using_dac = 0; - } else { + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { ecode = -ENODEV; goto init_error; } @@ -1143,8 +1139,8 @@ static int ace_init(struct net_device *dev) * and the control blocks for the transmit and receive rings * as they need to be setup once and for all. */ - if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info), - &ap->info_dma))) { + if (!(info = dma_alloc_coherent(&ap->pdev->dev, sizeof(struct ace_info), + &ap->info_dma, GFP_KERNEL))) { ecode = -EAGAIN; goto init_error; } @@ -1153,7 +1149,7 @@ static int ace_init(struct net_device *dev) /* * Get the memory for the skb rings. */ - if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) { + if (!(ap->skb = kzalloc(sizeof(struct ace_skb), GFP_KERNEL))) { ecode = -EAGAIN; goto init_error; } @@ -1174,9 +1170,6 @@ static int ace_init(struct net_device *dev) ap->last_mini_rx = 0; #endif - memset(ap->info, 0, sizeof(struct ace_info)); - memset(ap->skb, 0, sizeof(struct ace_skb)); - ecode = ace_load_firmware(dev); if (ecode) goto init_error; @@ -1567,10 +1560,10 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue) } -static void ace_tasklet(unsigned long arg) +static void ace_bh_work(struct work_struct *work) { - struct net_device *dev = (struct net_device *) arg; - struct ace_private *ap = netdev_priv(dev); + struct ace_private *ap = from_work(ap, work, ace_bh_work); + struct net_device *dev = ap->ndev; int cur_size; cur_size = atomic_read(&ap->cur_rx_bufs); @@ -1602,7 +1595,7 @@ static void ace_tasklet(unsigned long arg) #endif ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size); } - ap->tasklet_pending = 0; + ap->bh_work_pending = 0; } @@ -1624,7 +1617,7 @@ static void ace_dump_trace(struct ace_private *ap) * * Loading rings is safe without holding the spin lock since this is * done only before the device is enabled, thus no interrupts are - * generated and by the interrupt handler/tasklet handler. + * generated and by the interrupt handler/bh handler. */ static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs) { @@ -1646,10 +1639,10 @@ static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs) if (!skb) break; - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + mapping = dma_map_page(&ap->pdev->dev, + virt_to_page(skb->data), offset_in_page(skb->data), - ACE_STD_BUFSIZE, - PCI_DMA_FROMDEVICE); + ACE_STD_BUFSIZE, DMA_FROM_DEVICE); ap->skb->rx_std_skbuff[idx].skb = skb; dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], mapping, mapping); @@ -1707,10 +1700,10 @@ static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs) if (!skb) break; - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + mapping = dma_map_page(&ap->pdev->dev, + virt_to_page(skb->data), offset_in_page(skb->data), - ACE_MINI_BUFSIZE, - PCI_DMA_FROMDEVICE); + ACE_MINI_BUFSIZE, DMA_FROM_DEVICE); ap->skb->rx_mini_skbuff[idx].skb = skb; dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], mapping, mapping); @@ -1763,10 +1756,10 @@ static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs) if (!skb) break; - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + mapping = dma_map_page(&ap->pdev->dev, + virt_to_page(skb->data), offset_in_page(skb->data), - ACE_JUMBO_BUFSIZE, - PCI_DMA_FROMDEVICE); + ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE); ap->skb->rx_jumbo_skbuff[idx].skb = skb; dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], mapping, mapping); @@ -1887,16 +1880,16 @@ static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd) } } - if (ACE_IS_TIGON_I(ap)) { - struct cmd cmd; - cmd.evt = C_SET_RX_JUMBO_PRD_IDX; - cmd.code = 0; - cmd.idx = 0; - ace_issue_cmd(ap->regs, &cmd); - } else { - writel(0, &((ap->regs)->RxJumboPrd)); - wmb(); - } + if (ACE_IS_TIGON_I(ap)) { + struct cmd cmd; + cmd.evt = C_SET_RX_JUMBO_PRD_IDX; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(ap->regs, &cmd); + } else { + writel(0, &((ap->regs)->RxJumboPrd)); + wmb(); + } ap->jumbo = 0; ap->rx_jumbo_skbprd = 0; @@ -1977,10 +1970,8 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) skb = rip->skb; rip->skb = NULL; - pci_unmap_page(ap->pdev, - dma_unmap_addr(rip, mapping), - mapsize, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&ap->pdev->dev, dma_unmap_addr(rip, mapping), + mapsize, DMA_FROM_DEVICE); skb_put(skb, retdesc->size); /* @@ -2046,9 +2037,10 @@ static inline void ace_tx_int(struct net_device *dev, skb = info->skb; if (dma_unmap_len(info, maplen)) { - pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), + dma_unmap_page(&ap->pdev->dev, + dma_unmap_addr(info, mapping), dma_unmap_len(info, maplen), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); dma_unmap_len_set(info, maplen, 0); } @@ -2168,7 +2160,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id) */ if (netif_running(dev)) { int cur_size; - int run_tasklet = 0; + int run_bh_work = 0; cur_size = atomic_read(&ap->cur_rx_bufs); if (cur_size < RX_LOW_STD_THRES) { @@ -2180,7 +2172,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id) ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size); } else - run_tasklet = 1; + run_bh_work = 1; } if (!ACE_IS_TIGON_I(ap)) { @@ -2196,7 +2188,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id) ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size); } else - run_tasklet = 1; + run_bh_work = 1; } } @@ -2213,12 +2205,12 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id) ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size); } else - run_tasklet = 1; + run_bh_work = 1; } } - if (run_tasklet && !ap->tasklet_pending) { - ap->tasklet_pending = 1; - tasklet_schedule(&ap->ace_tasklet); + if (run_bh_work && !ap->bh_work_pending) { + ap->bh_work_pending = 1; + queue_work(system_bh_wq, &ap->ace_bh_work); } } @@ -2275,7 +2267,7 @@ static int ace_open(struct net_device *dev) /* * Setup the bottom half rx ring refill handler */ - tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev); + INIT_WORK(&ap->ace_bh_work, ace_bh_work); return 0; } @@ -2309,7 +2301,7 @@ static int ace_close(struct net_device *dev) cmd.idx = 0; ace_issue_cmd(regs, &cmd); - tasklet_kill(&ap->ace_tasklet); + cancel_work_sync(&ap->ace_bh_work); /* * Make sure one CPU is not processing packets while @@ -2337,9 +2329,10 @@ static int ace_close(struct net_device *dev) } else memset(ap->tx_ring + i, 0, sizeof(struct tx_desc)); - pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), + dma_unmap_page(&ap->pdev->dev, + dma_unmap_addr(info, mapping), dma_unmap_len(info, maplen), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); dma_unmap_len_set(info, maplen, 0); } if (skb) { @@ -2369,9 +2362,9 @@ ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb, dma_addr_t mapping; struct tx_ring_info *info; - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), - offset_in_page(skb->data), - skb->len, PCI_DMA_TODEVICE); + mapping = dma_map_page(&ap->pdev->dev, virt_to_page(skb->data), + offset_in_page(skb->data), skb->len, + DMA_TO_DEVICE); info = ap->skb->tx_skbuff + idx; info->skb = tail; @@ -2442,7 +2435,7 @@ restart: } else { dma_addr_t mapping; u32 vlan_tag = 0; - int i, len = 0; + int i; mapping = ace_map_tx_skb(ap, skb, NULL, idx); flagsize = (skb_headlen(skb) << 16); @@ -2461,7 +2454,6 @@ restart: const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; struct tx_ring_info *info; - len += skb_frag_size(frag); info = ap->skb->tx_skbuff + idx; desc = ap->tx_ring + idx; @@ -2493,9 +2485,9 @@ restart: } } - wmb(); - ap->tx_prd = idx; - ace_set_txprd(regs, ap, idx); + wmb(); + ap->tx_prd = idx; + ace_set_txprd(regs, ap, idx); if (flagsize & BD_FLG_COAL_NOW) { netif_stop_queue(dev); @@ -2547,7 +2539,7 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu) struct ace_regs __iomem *regs = ap->regs; writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (new_mtu > ACE_STD_MTU) { if (!(ap->jumbo)) { @@ -2698,12 +2690,12 @@ static void ace_get_drvinfo(struct net_device *dev, { struct ace_private *ap = netdev_priv(dev); - strlcpy(info->driver, "acenic", sizeof(info->driver)); + strscpy(info->driver, "acenic", sizeof(info->driver)); snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i", ap->firmware_major, ap->firmware_minor, ap->firmware_fix); if (ap->pdev) - strlcpy(info->bus_info, pci_name(ap->pdev), + strscpy(info->bus_info, pci_name(ap->pdev), sizeof(info->bus_info)); } @@ -2716,15 +2708,15 @@ static int ace_set_mac_addr(struct net_device *dev, void *p) struct ace_private *ap = netdev_priv(dev); struct ace_regs __iomem *regs = ap->regs; struct sockaddr *addr=p; - u8 *da; + const u8 *da; struct cmd cmd; if(netif_running(dev)) return -EBUSY; - memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); - da = (u8 *)dev->dev_addr; + da = (const u8 *)dev->dev_addr; writel(da[0] << 8 | da[1], ®s->MacAddrHi); writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5], |
