diff options
Diffstat (limited to 'drivers/net/ethernet/cavium/thunder/thunder_bgx.c')
| -rw-r--r-- | drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 442 |
1 files changed, 354 insertions, 88 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 5e5c4d7796b8..9efb60842ad1 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1,9 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Cavium, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License - * as published by the Free Software Foundation. */ #include <linux/acpi.h> @@ -21,12 +18,34 @@ #include "nic.h" #include "thunder_bgx.h" -#define DRV_NAME "thunder-BGX" +#define DRV_NAME "thunder_bgx" #define DRV_VERSION "1.0" +/* RX_DMAC_CTL configuration */ +enum MCAST_MODE { + MCAST_MODE_REJECT = 0x0, + MCAST_MODE_ACCEPT = 0x1, + MCAST_MODE_CAM_FILTER = 0x2, + RSVD = 0x3 +}; + +#define BCAST_ACCEPT BIT(0) +#define CAM_ACCEPT BIT(3) +#define MCAST_MODE_MASK 0x3 +#define BGX_MCAST_MODE(x) (x << 1) + +struct dmac_map { + u64 vf_map; + u64 dmac; +}; + struct lmac { struct bgx *bgx; - int dmac; + /* actual number of DMACs configured */ + u8 dmacs_cfg; + /* overal number of possible DMACs could be configured per LMAC */ + u8 dmacs_count; + struct dmac_map *dmacs; /* DMAC:VFs tracking filter array */ u8 mac[ETH_ALEN]; u8 lmac_type; u8 lane_to_sds; @@ -35,7 +54,7 @@ struct lmac { bool link_up; int lmacid; /* ID within BGX */ int lmacid_bd; /* ID on board */ - struct net_device netdev; + struct net_device *netdev; struct phy_device *phydev; unsigned int last_duplex; unsigned int last_link; @@ -223,6 +242,163 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) } EXPORT_SYMBOL(bgx_set_lmac_mac); +static void bgx_flush_dmac_cam_filter(struct bgx *bgx, int lmacid) +{ + struct lmac *lmac = NULL; + u8 idx = 0; + + lmac = &bgx->lmac[lmacid]; + /* reset CAM filters */ + for (idx = 0; idx < lmac->dmacs_count; idx++) + bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + + ((lmacid * lmac->dmacs_count) + idx) * + sizeof(u64), 0); +} + +static void bgx_lmac_remove_filters(struct lmac *lmac, u8 vf_id) +{ + int i = 0; + + if (!lmac) + return; + + /* We've got reset filters request from some of attached VF, while the + * others might want to keep their configuration. So in this case lets + * iterate over all of configured filters and decrease number of + * referencies. if some addresses get zero refs remove them from list + */ + for (i = lmac->dmacs_cfg - 1; i >= 0; i--) { + lmac->dmacs[i].vf_map &= ~BIT_ULL(vf_id); + if (!lmac->dmacs[i].vf_map) { + lmac->dmacs_cfg--; + lmac->dmacs[i].dmac = 0; + lmac->dmacs[i].vf_map = 0; + } + } +} + +static int bgx_lmac_save_filter(struct lmac *lmac, u64 dmac, u8 vf_id) +{ + u8 i = 0; + + if (!lmac) + return -1; + + /* At the same time we could have several VFs 'attached' to some + * particular LMAC, and each VF is represented as network interface + * for kernel. So from user perspective it should be possible to + * manipulate with its' (VF) receive modes. However from PF + * driver perspective we need to keep track of filter configurations + * for different VFs to prevent filter values dupes + */ + for (i = 0; i < lmac->dmacs_cfg; i++) { + if (lmac->dmacs[i].dmac == dmac) { + lmac->dmacs[i].vf_map |= BIT_ULL(vf_id); + return -1; + } + } + + if (!(lmac->dmacs_cfg < lmac->dmacs_count)) + return -1; + + /* keep it for further tracking */ + lmac->dmacs[lmac->dmacs_cfg].dmac = dmac; + lmac->dmacs[lmac->dmacs_cfg].vf_map = BIT_ULL(vf_id); + lmac->dmacs_cfg++; + return 0; +} + +static int bgx_set_dmac_cam_filter_mac(struct bgx *bgx, int lmacid, + u64 cam_dmac, u8 idx) +{ + struct lmac *lmac = NULL; + u64 cfg = 0; + + /* skip zero addresses as meaningless */ + if (!cam_dmac || !bgx) + return -1; + + lmac = &bgx->lmac[lmacid]; + + /* configure DCAM filtering for designated LMAC */ + cfg = RX_DMACX_CAM_LMACID(lmacid & LMAC_ID_MASK) | + RX_DMACX_CAM_EN | cam_dmac; + bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + + ((lmacid * lmac->dmacs_count) + idx) * sizeof(u64), cfg); + return 0; +} + +void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, + u64 cam_dmac, u8 vf_id) +{ + struct bgx *bgx = get_bgx(node, bgx_idx); + struct lmac *lmac = NULL; + + if (!bgx) + return; + + lmac = &bgx->lmac[lmacid]; + + if (!cam_dmac) + cam_dmac = ether_addr_to_u64(lmac->mac); + + /* since we might have several VFs attached to particular LMAC + * and kernel could call mcast config for each of them with the + * same MAC, check if requested MAC is already in filtering list and + * updare/prepare list of MACs to be applied later to HW filters + */ + bgx_lmac_save_filter(lmac, cam_dmac, vf_id); +} +EXPORT_SYMBOL(bgx_set_dmac_cam_filter); + +void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode) +{ + struct bgx *bgx = get_bgx(node, bgx_idx); + struct lmac *lmac = NULL; + u64 cfg = 0; + u8 i = 0; + + if (!bgx) + return; + + lmac = &bgx->lmac[lmacid]; + + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL); + if (mode & BGX_XCAST_BCAST_ACCEPT) + cfg |= BCAST_ACCEPT; + else + cfg &= ~BCAST_ACCEPT; + + /* disable all MCASTs and DMAC filtering */ + cfg &= ~(CAM_ACCEPT | BGX_MCAST_MODE(MCAST_MODE_MASK)); + + /* check requested bits and set filtergin mode appropriately */ + if (mode & (BGX_XCAST_MCAST_ACCEPT)) { + cfg |= (BGX_MCAST_MODE(MCAST_MODE_ACCEPT)); + } else if (mode & BGX_XCAST_MCAST_FILTER) { + cfg |= (BGX_MCAST_MODE(MCAST_MODE_CAM_FILTER) | CAM_ACCEPT); + for (i = 0; i < lmac->dmacs_cfg; i++) + bgx_set_dmac_cam_filter_mac(bgx, lmacid, + lmac->dmacs[i].dmac, i); + } + bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, cfg); +} +EXPORT_SYMBOL(bgx_set_xcast_mode); + +void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf_id) +{ + struct bgx *bgx = get_bgx(node, bgx_idx); + + if (!bgx) + return; + + bgx_lmac_remove_filters(&bgx->lmac[lmacid], vf_id); + bgx_flush_dmac_cam_filter(bgx, lmacid); + bgx_set_xcast_mode(node, bgx_idx, lmacid, + (BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT)); +} +EXPORT_SYMBOL(bgx_reset_xcast_mode); + void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) { struct bgx *bgx = get_bgx(node, bgx_idx); @@ -234,10 +410,19 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) lmac = &bgx->lmac[lmacid]; cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); - if (enable) + if (enable) { cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; - else + + /* enable TX FIFO Underflow interrupt */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S, + GMI_TXX_INT_UNDFLW); + } else { cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); + + /* Disable TX FIFO Underflow interrupt */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C, + GMI_TXX_INT_UNDFLW); + } bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); if (bgx->is_rgx) @@ -245,6 +430,35 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) } EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); +/* Enables or disables timestamp insertion by BGX for Rx packets */ +void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable) +{ + struct bgx *bgx = get_bgx(node, bgx_idx); + struct lmac *lmac; + u64 csr_offset, cfg; + + if (!bgx) + return; + + lmac = &bgx->lmac[lmacid]; + + if (lmac->lmac_type == BGX_MODE_SGMII || + lmac->lmac_type == BGX_MODE_QSGMII || + lmac->lmac_type == BGX_MODE_RGMII) + csr_offset = BGX_GMP_GMI_RXX_FRM_CTL; + else + csr_offset = BGX_SMUX_RX_FRM_CTL; + + cfg = bgx_reg_read(bgx, lmacid, csr_offset); + + if (enable) + cfg |= BGX_PKT_RX_PTP_EN; + else + cfg &= ~BGX_PKT_RX_PTP_EN; + bgx_reg_write(bgx, lmacid, csr_offset, cfg); +} +EXPORT_SYMBOL(bgx_config_timestamping); + void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause) { struct pfc *pfc = (struct pfc *)pause; @@ -376,13 +590,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac) static void bgx_lmac_handler(struct net_device *netdev) { - struct lmac *lmac = container_of(netdev, struct lmac, netdev); struct phy_device *phydev; + struct lmac *lmac, **priv; int link_changed = 0; - if (!lmac) - return; - + priv = netdev_priv(netdev); + lmac = *priv; phydev = lmac->phydev; if (!phydev->link && lmac->last_link) @@ -439,18 +652,6 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) } EXPORT_SYMBOL(bgx_get_tx_stats); -static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) -{ - u64 offset; - - while (bgx->lmac[lmac].dmac > 0) { - offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) + - (lmac * MAX_DMAC_PER_LMAC * sizeof(u64)); - bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); - bgx->lmac[lmac].dmac--; - } -} - /* Configure BGX LMAC in internal loopback mode */ void bgx_lmac_internal_loopback(int node, int bgx_idx, int lmac_idx, bool enable) @@ -766,13 +967,13 @@ static void bgx_poll_for_sgmii_link(struct lmac *lmac) lmac->last_duplex = (an_result >> 1) & 0x1; switch (speed) { case 0: - lmac->last_speed = 10; + lmac->last_speed = SPEED_10; break; case 1: - lmac->last_speed = 100; + lmac->last_speed = SPEED_100; break; case 2: - lmac->last_speed = 1000; + lmac->last_speed = SPEED_1000; break; default: lmac->link_up = false; @@ -814,14 +1015,14 @@ static void bgx_poll_for_link(struct work_struct *work) if ((spu_link & SPU_STATUS1_RCV_LNK) && !(smu_link & SMU_RX_CTL_STATUS)) { - lmac->link_up = 1; + lmac->link_up = true; if (lmac->lmac_type == BGX_MODE_XLAUI) - lmac->last_speed = 40000; + lmac->last_speed = SPEED_40000; else - lmac->last_speed = 10000; - lmac->last_duplex = 1; + lmac->last_speed = SPEED_10000; + lmac->last_duplex = DUPLEX_FULL; } else { - lmac->link_up = 0; + lmac->link_up = false; lmac->last_speed = SPEED_UNKNOWN; lmac->last_duplex = DUPLEX_UNKNOWN; } @@ -830,7 +1031,7 @@ static void bgx_poll_for_link(struct work_struct *work) if (lmac->link_up) { if (bgx_xaui_check_link(lmac)) { /* Errors, clear link_up state */ - lmac->link_up = 0; + lmac->link_up = false; lmac->last_speed = SPEED_UNKNOWN; lmac->last_duplex = DUPLEX_UNKNOWN; } @@ -846,7 +1047,7 @@ static int phy_interface_mode(u8 lmac_type) if (lmac_type == BGX_MODE_QSGMII) return PHY_INTERFACE_MODE_QSGMII; if (lmac_type == BGX_MODE_RGMII) - return PHY_INTERFACE_MODE_RGMII; + return PHY_INTERFACE_MODE_RGMII_RXID; return PHY_INTERFACE_MODE_SGMII; } @@ -862,11 +1063,11 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) if ((lmac->lmac_type == BGX_MODE_SGMII) || (lmac->lmac_type == BGX_MODE_QSGMII) || (lmac->lmac_type == BGX_MODE_RGMII)) { - lmac->is_sgmii = 1; + lmac->is_sgmii = true; if (bgx_lmac_sgmii_init(bgx, lmac)) return -1; } else { - lmac->is_sgmii = 0; + lmac->is_sgmii = false; if (bgx_lmac_xaui_init(bgx, lmac)) return -1; } @@ -883,6 +1084,13 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); } + /* actual number of filters available to exact LMAC */ + lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count); + lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs), + GFP_KERNEL); + if (!lmac->dmacs) + return -ENOMEM; + /* Enable lmac */ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); @@ -902,26 +1110,25 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) } else { /* Default to below link speed and duplex */ lmac->link_up = true; - lmac->last_speed = 1000; - lmac->last_duplex = 1; + lmac->last_speed = SPEED_1000; + lmac->last_duplex = DUPLEX_FULL; bgx_sgmii_change_link_state(lmac); return 0; } } lmac->phydev->dev_flags = 0; - if (phy_connect_direct(&lmac->netdev, lmac->phydev, + if (phy_connect_direct(lmac->netdev, lmac->phydev, bgx_lmac_handler, phy_interface_mode(lmac->lmac_type))) return -ENODEV; - phy_start_aneg(lmac->phydev); + phy_start(lmac->phydev); return 0; } poll: - lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | - WQ_MEM_RECLAIM, 1); + lmac->check_link = alloc_ordered_workqueue("check_link", WQ_MEM_RECLAIM); if (!lmac->check_link) return -ENOMEM; INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); @@ -969,7 +1176,8 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) cfg &= ~CMR_EN; bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); - bgx_flush_dmac_addrs(bgx, lmacid); + bgx_flush_dmac_cam_filter(bgx, lmacid); + kfree(lmac->dmacs); if ((lmac->lmac_type != BGX_MODE_XFI) && (lmac->lmac_type != BGX_MODE_XLAUI) && @@ -1013,7 +1221,7 @@ static void bgx_init_hw(struct bgx *bgx) /* Disable MAC steering (NCSI traffic) */ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) - bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); + bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00); } static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) @@ -1103,7 +1311,7 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) { if ((lmac->lmac_type != BGX_MODE_10G_KR) && (lmac->lmac_type != BGX_MODE_40G_KR)) { - lmac->use_training = 0; + lmac->use_training = false; return; } @@ -1182,22 +1390,16 @@ static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, u8 mac[ETH_ALEN]; int ret; - ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), - "mac-address", mac, ETH_ALEN); - if (ret) - goto out; - - if (!is_valid_ether_addr(mac)) { + ret = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac); + if (ret) { dev_err(dev, "MAC address invalid: %pM\n", mac); - ret = -EINVAL; - goto out; + return -EINVAL; } dev_info(dev, "MAC address set to: %pM\n", mac); - memcpy(dst, mac, ETH_ALEN); -out: - return ret; + ether_addr_copy(dst, mac); + return 0; } /* Currently only sets the MAC address. */ @@ -1208,12 +1410,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, struct device *dev = &bgx->pdev->dev; struct acpi_device *adev; - if (acpi_bus_get_device(handle, &adev)) + adev = acpi_fetch_acpi_dev(handle); + if (!adev) goto out; acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac); - SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); + SET_NETDEV_DEV(bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx; bgx->acpi_lmac_idx++; /* move to next LMAC */ @@ -1226,16 +1429,18 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, { struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; struct bgx *bgx = context; - char bgx_sel[5]; + char bgx_sel[7]; - snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id); + snprintf(bgx_sel, sizeof(bgx_sel), "BGX%d", bgx->bgx_id); if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { pr_warn("Invalid link device\n"); return AE_OK; } - if (strncmp(string.pointer, bgx_sel, 4)) + if (strncmp(string.pointer, bgx_sel, 4)) { + kfree(string.pointer); return AE_OK; + } acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, bgx_acpi_register_phy, NULL, bgx, NULL); @@ -1270,7 +1475,6 @@ static int bgx_init_of_phy(struct bgx *bgx) device_for_each_child_node(&bgx->pdev->dev, fwn) { struct phy_device *pd; struct device_node *phy_np; - const char *mac; /* Should always be an OF node. But if it is not, we * cannot handle it, so exit the loop. @@ -1279,11 +1483,9 @@ static int bgx_init_of_phy(struct bgx *bgx) if (!node) break; - mac = of_get_mac_address(node); - if (mac) - ether_addr_copy(bgx->lmac[lmac].mac, mac); + of_get_mac_address(node, bgx->lmac[lmac].mac); - SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); + SET_NETDEV_DEV(bgx->lmac[lmac].netdev, &bgx->pdev->dev); bgx->lmac[lmac].lmacid = lmac; phy_np = of_parse_phandle(node, "phy-handle", 0); @@ -1291,13 +1493,17 @@ static int bgx_init_of_phy(struct bgx *bgx) * this cortina phy, for which there is no driver * support, ignore it. */ - if (phy_np && - !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) { - /* Wait until the phy drivers are available */ - pd = of_phy_find_device(phy_np); - if (!pd) - goto defer; - bgx->lmac[lmac].phydev = pd; + if (phy_np) { + if (!of_device_is_compatible(phy_np, "cortina,cs4223-slice")) { + /* Wait until the phy drivers are available */ + pd = of_phy_find_device(phy_np); + if (!pd) { + of_node_put(phy_np); + goto defer; + } + bgx->lmac[lmac].phydev = pd; + } + of_node_put(phy_np); } lmac++; @@ -1313,11 +1519,11 @@ defer: * for phy devices we may have already found. */ while (lmac) { + lmac--; if (bgx->lmac[lmac].phydev) { put_device(&bgx->lmac[lmac].phydev->mdio.dev); bgx->lmac[lmac].phydev = NULL; } - lmac--; } of_node_put(node); return -EPROBE_DEFER; @@ -1340,6 +1546,48 @@ static int bgx_init_phy(struct bgx *bgx) return bgx_init_of_phy(bgx); } +static irqreturn_t bgx_intr_handler(int irq, void *data) +{ + struct bgx *bgx = (struct bgx *)data; + u64 status, val; + int lmac; + + for (lmac = 0; lmac < bgx->lmac_count; lmac++) { + status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT); + if (status & GMI_TXX_INT_UNDFLW) { + pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n", + bgx->bgx_id, lmac); + val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG); + val &= ~CMR_EN; + bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); + val |= CMR_EN; + bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); + } + /* clear interrupts */ + bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status); + } + + return IRQ_HANDLED; +} + +static void bgx_register_intr(struct pci_dev *pdev) +{ + struct bgx *bgx = pci_get_drvdata(pdev); + int ret; + + ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET, + BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES); + if (ret < 0) { + pci_err(pdev, "Req for #%d msix vectors failed\n", + BGX_LMAC_VEC_OFFSET); + return; + } + ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL, + bgx, "BGX%d", bgx->bgx_id); + if (ret) + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); +} + static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; @@ -1355,17 +1603,16 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, bgx); - err = pci_enable_device(pdev); + err = pcim_enable_device(pdev); if (err) { - dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); - return err; + return dev_err_probe(dev, err, "Failed to enable PCI device\n"); } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); - goto err_disable_device; + goto err_zero_drv_data; } /* MAP configuration registers */ @@ -1373,7 +1620,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!bgx->reg_base) { dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n"); err = -ENOMEM; - goto err_release_regions; + goto err_zero_drv_data; } set_max_bgx_per_node(pdev); @@ -1403,12 +1650,31 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) bgx_get_qlm_mode(bgx); + for (lmac = 0; lmac < bgx->lmac_count; lmac++) { + struct lmac *lmacp, **priv; + + lmacp = &bgx->lmac[lmac]; + lmacp->netdev = alloc_netdev_dummy(sizeof(struct lmac *)); + + if (!lmacp->netdev) { + for (int i = 0; i < lmac; i++) + free_netdev(bgx->lmac[i].netdev); + err = -ENOMEM; + goto err_enable; + } + + priv = netdev_priv(lmacp->netdev); + *priv = lmacp; + } + err = bgx_init_phy(bgx); if (err) goto err_enable; bgx_init_hw(bgx); + bgx_register_intr(pdev); + /* Enable all LMACs */ for (lmac = 0; lmac < bgx->lmac_count; lmac++) { err = bgx_lmac_enable(bgx, lmac); @@ -1425,10 +1691,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_enable: bgx_vnic[bgx->bgx_id] = NULL; -err_release_regions: - pci_release_regions(pdev); -err_disable_device: - pci_disable_device(pdev); + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); +err_zero_drv_data: pci_set_drvdata(pdev, NULL); return err; } @@ -1439,12 +1703,14 @@ static void bgx_remove(struct pci_dev *pdev) u8 lmac; /* Disable all LMACs */ - for (lmac = 0; lmac < bgx->lmac_count; lmac++) + for (lmac = 0; lmac < bgx->lmac_count; lmac++) { bgx_lmac_disable(bgx, lmac); + free_netdev(bgx->lmac[lmac].netdev); + } + + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); bgx_vnic[bgx->bgx_id] = NULL; - pci_release_regions(pdev); - pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } |
